]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amdgpu/display: re-enable freesync video patches
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 static bool
216 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
217                                  struct drm_crtc_state *new_crtc_state);
218 /*
219  * dm_vblank_get_counter
220  *
221  * @brief
222  * Get counter for number of vertical blanks
223  *
224  * @param
225  * struct amdgpu_device *adev - [in] desired amdgpu device
226  * int disp_idx - [in] which CRTC to get the counter from
227  *
228  * @return
229  * Counter for vertical blanks
230  */
231 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
232 {
233         if (crtc >= adev->mode_info.num_crtc)
234                 return 0;
235         else {
236                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
237
238                 if (acrtc->dm_irq_params.stream == NULL) {
239                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
240                                   crtc);
241                         return 0;
242                 }
243
244                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245         }
246 }
247
248 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
249                                   u32 *vbl, u32 *position)
250 {
251         uint32_t v_blank_start, v_blank_end, h_position, v_position;
252
253         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254                 return -EINVAL;
255         else {
256                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257
258                 if (acrtc->dm_irq_params.stream ==  NULL) {
259                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260                                   crtc);
261                         return 0;
262                 }
263
264                 /*
265                  * TODO rework base driver to use values directly.
266                  * for now parse it back into reg-format
267                  */
268                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
269                                          &v_blank_start,
270                                          &v_blank_end,
271                                          &h_position,
272                                          &v_position);
273
274                 *position = v_position | (h_position << 16);
275                 *vbl = v_blank_start | (v_blank_end << 16);
276         }
277
278         return 0;
279 }
280
281 static bool dm_is_idle(void *handle)
282 {
283         /* XXX todo */
284         return true;
285 }
286
287 static int dm_wait_for_idle(void *handle)
288 {
289         /* XXX todo */
290         return 0;
291 }
292
293 static bool dm_check_soft_reset(void *handle)
294 {
295         return false;
296 }
297
298 static int dm_soft_reset(void *handle)
299 {
300         /* XXX todo */
301         return 0;
302 }
303
304 static struct amdgpu_crtc *
305 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306                      int otg_inst)
307 {
308         struct drm_device *dev = adev_to_drm(adev);
309         struct drm_crtc *crtc;
310         struct amdgpu_crtc *amdgpu_crtc;
311
312         if (otg_inst == -1) {
313                 WARN_ON(1);
314                 return adev->mode_info.crtcs[0];
315         }
316
317         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
318                 amdgpu_crtc = to_amdgpu_crtc(crtc);
319
320                 if (amdgpu_crtc->otg_inst == otg_inst)
321                         return amdgpu_crtc;
322         }
323
324         return NULL;
325 }
326
327 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
328 {
329         return acrtc->dm_irq_params.freesync_config.state ==
330                        VRR_STATE_ACTIVE_VARIABLE ||
331                acrtc->dm_irq_params.freesync_config.state ==
332                        VRR_STATE_ACTIVE_FIXED;
333 }
334
335 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
336 {
337         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
338                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342                                               struct dm_crtc_state *new_state)
343 {
344         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
345                 return true;
346         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
347                 return true;
348         else
349                 return false;
350 }
351
352 /**
353  * dm_pflip_high_irq() - Handle pageflip interrupt
354  * @interrupt_params: ignored
355  *
356  * Handles the pageflip interrupt by notifying all interested parties
357  * that the pageflip has been completed.
358  */
359 static void dm_pflip_high_irq(void *interrupt_params)
360 {
361         struct amdgpu_crtc *amdgpu_crtc;
362         struct common_irq_params *irq_params = interrupt_params;
363         struct amdgpu_device *adev = irq_params->adev;
364         unsigned long flags;
365         struct drm_pending_vblank_event *e;
366         uint32_t vpos, hpos, v_blank_start, v_blank_end;
367         bool vrr_active;
368
369         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
370
371         /* IRQ could occur when in initial stage */
372         /* TODO work and BO cleanup */
373         if (amdgpu_crtc == NULL) {
374                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
375                 return;
376         }
377
378         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
379
380         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
381                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
382                                                  amdgpu_crtc->pflip_status,
383                                                  AMDGPU_FLIP_SUBMITTED,
384                                                  amdgpu_crtc->crtc_id,
385                                                  amdgpu_crtc);
386                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
387                 return;
388         }
389
390         /* page flip completed. */
391         e = amdgpu_crtc->event;
392         amdgpu_crtc->event = NULL;
393
394         if (!e)
395                 WARN_ON(1);
396
397         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398
399         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
400         if (!vrr_active ||
401             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402                                       &v_blank_end, &hpos, &vpos) ||
403             (vpos < v_blank_start)) {
404                 /* Update to correct count and vblank timestamp if racing with
405                  * vblank irq. This also updates to the correct vblank timestamp
406                  * even in VRR mode, as scanout is past the front-porch atm.
407                  */
408                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409
410                 /* Wake up userspace by sending the pageflip event with proper
411                  * count and timestamp of vblank of flip completion.
412                  */
413                 if (e) {
414                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415
416                         /* Event sent, so done with vblank for this flip */
417                         drm_crtc_vblank_put(&amdgpu_crtc->base);
418                 }
419         } else if (e) {
420                 /* VRR active and inside front-porch: vblank count and
421                  * timestamp for pageflip event will only be up to date after
422                  * drm_crtc_handle_vblank() has been executed from late vblank
423                  * irq handler after start of back-porch (vline 0). We queue the
424                  * pageflip event for send-out by drm_crtc_handle_vblank() with
425                  * updated timestamp and count, once it runs after us.
426                  *
427                  * We need to open-code this instead of using the helper
428                  * drm_crtc_arm_vblank_event(), as that helper would
429                  * call drm_crtc_accurate_vblank_count(), which we must
430                  * not call in VRR mode while we are in front-porch!
431                  */
432
433                 /* sequence will be replaced by real count during send-out. */
434                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435                 e->pipe = amdgpu_crtc->crtc_id;
436
437                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438                 e = NULL;
439         }
440
441         /* Keep track of vblank of this flip for flip throttling. We use the
442          * cooked hw counter, as that one incremented at start of this vblank
443          * of pageflip completion, so last_flip_vblank is the forbidden count
444          * for queueing new pageflips if vsync + VRR is enabled.
445          */
446         amdgpu_crtc->dm_irq_params.last_flip_vblank =
447                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448
449         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451
452         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453                          amdgpu_crtc->crtc_id, amdgpu_crtc,
454                          vrr_active, (int) !e);
455 }
456
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459         struct common_irq_params *irq_params = interrupt_params;
460         struct amdgpu_device *adev = irq_params->adev;
461         struct amdgpu_crtc *acrtc;
462         unsigned long flags;
463         int vrr_active;
464
465         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466
467         if (acrtc) {
468                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469
470                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
471                               acrtc->crtc_id,
472                               vrr_active);
473
474                 /* Core vblank handling is done here after end of front-porch in
475                  * vrr mode, as vblank timestamping will give valid results
476                  * while now done after front-porch. This will also deliver
477                  * page-flip completion events that have been queued to us
478                  * if a pageflip happened inside front-porch.
479                  */
480                 if (vrr_active) {
481                         drm_crtc_handle_vblank(&acrtc->base);
482
483                         /* BTR processing for pre-DCE12 ASICs */
484                         if (acrtc->dm_irq_params.stream &&
485                             adev->family < AMDGPU_FAMILY_AI) {
486                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
487                                 mod_freesync_handle_v_update(
488                                     adev->dm.freesync_module,
489                                     acrtc->dm_irq_params.stream,
490                                     &acrtc->dm_irq_params.vrr_params);
491
492                                 dc_stream_adjust_vmin_vmax(
493                                     adev->dm.dc,
494                                     acrtc->dm_irq_params.stream,
495                                     &acrtc->dm_irq_params.vrr_params.adjust);
496                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
497                         }
498                 }
499         }
500 }
501
502 /**
503  * dm_crtc_high_irq() - Handles CRTC interrupt
504  * @interrupt_params: used for determining the CRTC instance
505  *
506  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
507  * event handler.
508  */
509 static void dm_crtc_high_irq(void *interrupt_params)
510 {
511         struct common_irq_params *irq_params = interrupt_params;
512         struct amdgpu_device *adev = irq_params->adev;
513         struct amdgpu_crtc *acrtc;
514         unsigned long flags;
515         int vrr_active;
516
517         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
518         if (!acrtc)
519                 return;
520
521         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
522
523         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
524                       vrr_active, acrtc->dm_irq_params.active_planes);
525
526         /**
527          * Core vblank handling at start of front-porch is only possible
528          * in non-vrr mode, as only there vblank timestamping will give
529          * valid results while done in front-porch. Otherwise defer it
530          * to dm_vupdate_high_irq after end of front-porch.
531          */
532         if (!vrr_active)
533                 drm_crtc_handle_vblank(&acrtc->base);
534
535         /**
536          * Following stuff must happen at start of vblank, for crc
537          * computation and below-the-range btr support in vrr mode.
538          */
539         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
540
541         /* BTR updates need to happen before VUPDATE on Vega and above. */
542         if (adev->family < AMDGPU_FAMILY_AI)
543                 return;
544
545         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
546
547         if (acrtc->dm_irq_params.stream &&
548             acrtc->dm_irq_params.vrr_params.supported &&
549             acrtc->dm_irq_params.freesync_config.state ==
550                     VRR_STATE_ACTIVE_VARIABLE) {
551                 mod_freesync_handle_v_update(adev->dm.freesync_module,
552                                              acrtc->dm_irq_params.stream,
553                                              &acrtc->dm_irq_params.vrr_params);
554
555                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
556                                            &acrtc->dm_irq_params.vrr_params.adjust);
557         }
558
559         /*
560          * If there aren't any active_planes then DCH HUBP may be clock-gated.
561          * In that case, pageflip completion interrupts won't fire and pageflip
562          * completion events won't get delivered. Prevent this by sending
563          * pending pageflip events from here if a flip is still pending.
564          *
565          * If any planes are enabled, use dm_pflip_high_irq() instead, to
566          * avoid race conditions between flip programming and completion,
567          * which could cause too early flip completion events.
568          */
569         if (adev->family >= AMDGPU_FAMILY_RV &&
570             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
571             acrtc->dm_irq_params.active_planes == 0) {
572                 if (acrtc->event) {
573                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
574                         acrtc->event = NULL;
575                         drm_crtc_vblank_put(&acrtc->base);
576                 }
577                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
578         }
579
580         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
581 }
582
583 #if defined(CONFIG_DRM_AMD_DC_DCN)
584 /**
585  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
586  * DCN generation ASICs
587  * @interrupt params - interrupt parameters
588  *
589  * Used to set crc window/read out crc value at vertical line 0 position
590  */
591 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
592 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
593 {
594         struct common_irq_params *irq_params = interrupt_params;
595         struct amdgpu_device *adev = irq_params->adev;
596         struct amdgpu_crtc *acrtc;
597
598         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
599
600         if (!acrtc)
601                 return;
602
603         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
604 }
605 #endif
606 #endif
607
608 static int dm_set_clockgating_state(void *handle,
609                   enum amd_clockgating_state state)
610 {
611         return 0;
612 }
613
614 static int dm_set_powergating_state(void *handle,
615                   enum amd_powergating_state state)
616 {
617         return 0;
618 }
619
620 /* Prototypes of private functions */
621 static int dm_early_init(void* handle);
622
623 /* Allocate memory for FBC compressed data  */
624 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
625 {
626         struct drm_device *dev = connector->dev;
627         struct amdgpu_device *adev = drm_to_adev(dev);
628         struct dm_compressor_info *compressor = &adev->dm.compressor;
629         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
630         struct drm_display_mode *mode;
631         unsigned long max_size = 0;
632
633         if (adev->dm.dc->fbc_compressor == NULL)
634                 return;
635
636         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
637                 return;
638
639         if (compressor->bo_ptr)
640                 return;
641
642
643         list_for_each_entry(mode, &connector->modes, head) {
644                 if (max_size < mode->htotal * mode->vtotal)
645                         max_size = mode->htotal * mode->vtotal;
646         }
647
648         if (max_size) {
649                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
650                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
651                             &compressor->gpu_addr, &compressor->cpu_addr);
652
653                 if (r)
654                         DRM_ERROR("DM: Failed to initialize FBC\n");
655                 else {
656                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
657                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
658                 }
659
660         }
661
662 }
663
664 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
665                                           int pipe, bool *enabled,
666                                           unsigned char *buf, int max_bytes)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_connector *connector;
671         struct drm_connector_list_iter conn_iter;
672         struct amdgpu_dm_connector *aconnector;
673         int ret = 0;
674
675         *enabled = false;
676
677         mutex_lock(&adev->dm.audio_lock);
678
679         drm_connector_list_iter_begin(dev, &conn_iter);
680         drm_for_each_connector_iter(connector, &conn_iter) {
681                 aconnector = to_amdgpu_dm_connector(connector);
682                 if (aconnector->audio_inst != port)
683                         continue;
684
685                 *enabled = true;
686                 ret = drm_eld_size(connector->eld);
687                 memcpy(buf, connector->eld, min(max_bytes, ret));
688
689                 break;
690         }
691         drm_connector_list_iter_end(&conn_iter);
692
693         mutex_unlock(&adev->dm.audio_lock);
694
695         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
696
697         return ret;
698 }
699
700 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
701         .get_eld = amdgpu_dm_audio_component_get_eld,
702 };
703
704 static int amdgpu_dm_audio_component_bind(struct device *kdev,
705                                        struct device *hda_kdev, void *data)
706 {
707         struct drm_device *dev = dev_get_drvdata(kdev);
708         struct amdgpu_device *adev = drm_to_adev(dev);
709         struct drm_audio_component *acomp = data;
710
711         acomp->ops = &amdgpu_dm_audio_component_ops;
712         acomp->dev = kdev;
713         adev->dm.audio_component = acomp;
714
715         return 0;
716 }
717
718 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
719                                           struct device *hda_kdev, void *data)
720 {
721         struct drm_device *dev = dev_get_drvdata(kdev);
722         struct amdgpu_device *adev = drm_to_adev(dev);
723         struct drm_audio_component *acomp = data;
724
725         acomp->ops = NULL;
726         acomp->dev = NULL;
727         adev->dm.audio_component = NULL;
728 }
729
730 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
731         .bind   = amdgpu_dm_audio_component_bind,
732         .unbind = amdgpu_dm_audio_component_unbind,
733 };
734
735 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
736 {
737         int i, ret;
738
739         if (!amdgpu_audio)
740                 return 0;
741
742         adev->mode_info.audio.enabled = true;
743
744         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
745
746         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
747                 adev->mode_info.audio.pin[i].channels = -1;
748                 adev->mode_info.audio.pin[i].rate = -1;
749                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
750                 adev->mode_info.audio.pin[i].status_bits = 0;
751                 adev->mode_info.audio.pin[i].category_code = 0;
752                 adev->mode_info.audio.pin[i].connected = false;
753                 adev->mode_info.audio.pin[i].id =
754                         adev->dm.dc->res_pool->audios[i]->inst;
755                 adev->mode_info.audio.pin[i].offset = 0;
756         }
757
758         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
759         if (ret < 0)
760                 return ret;
761
762         adev->dm.audio_registered = true;
763
764         return 0;
765 }
766
767 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
768 {
769         if (!amdgpu_audio)
770                 return;
771
772         if (!adev->mode_info.audio.enabled)
773                 return;
774
775         if (adev->dm.audio_registered) {
776                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777                 adev->dm.audio_registered = false;
778         }
779
780         /* TODO: Disable audio? */
781
782         adev->mode_info.audio.enabled = false;
783 }
784
785 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
786 {
787         struct drm_audio_component *acomp = adev->dm.audio_component;
788
789         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
790                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
791
792                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
793                                                  pin, -1);
794         }
795 }
796
797 static int dm_dmub_hw_init(struct amdgpu_device *adev)
798 {
799         const struct dmcub_firmware_header_v1_0 *hdr;
800         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
801         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
802         const struct firmware *dmub_fw = adev->dm.dmub_fw;
803         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
804         struct abm *abm = adev->dm.dc->res_pool->abm;
805         struct dmub_srv_hw_params hw_params;
806         enum dmub_status status;
807         const unsigned char *fw_inst_const, *fw_bss_data;
808         uint32_t i, fw_inst_const_size, fw_bss_data_size;
809         bool has_hw_support;
810
811         if (!dmub_srv)
812                 /* DMUB isn't supported on the ASIC. */
813                 return 0;
814
815         if (!fb_info) {
816                 DRM_ERROR("No framebuffer info for DMUB service.\n");
817                 return -EINVAL;
818         }
819
820         if (!dmub_fw) {
821                 /* Firmware required for DMUB support. */
822                 DRM_ERROR("No firmware provided for DMUB.\n");
823                 return -EINVAL;
824         }
825
826         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
827         if (status != DMUB_STATUS_OK) {
828                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
829                 return -EINVAL;
830         }
831
832         if (!has_hw_support) {
833                 DRM_INFO("DMUB unsupported on ASIC\n");
834                 return 0;
835         }
836
837         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
838
839         fw_inst_const = dmub_fw->data +
840                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
841                         PSP_HEADER_BYTES;
842
843         fw_bss_data = dmub_fw->data +
844                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
845                       le32_to_cpu(hdr->inst_const_bytes);
846
847         /* Copy firmware and bios info into FB memory. */
848         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
849                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
850
851         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
852
853         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
854          * amdgpu_ucode_init_single_fw will load dmub firmware
855          * fw_inst_const part to cw0; otherwise, the firmware back door load
856          * will be done by dm_dmub_hw_init
857          */
858         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
859                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
860                                 fw_inst_const_size);
861         }
862
863         if (fw_bss_data_size)
864                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
865                        fw_bss_data, fw_bss_data_size);
866
867         /* Copy firmware bios info into FB memory. */
868         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
869                adev->bios_size);
870
871         /* Reset regions that need to be reset. */
872         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
873         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
874
875         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
876                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
877
878         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
879                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
880
881         /* Initialize hardware. */
882         memset(&hw_params, 0, sizeof(hw_params));
883         hw_params.fb_base = adev->gmc.fb_start;
884         hw_params.fb_offset = adev->gmc.aper_base;
885
886         /* backdoor load firmware and trigger dmub running */
887         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
888                 hw_params.load_inst_const = true;
889
890         if (dmcu)
891                 hw_params.psp_version = dmcu->psp_version;
892
893         for (i = 0; i < fb_info->num_fb; ++i)
894                 hw_params.fb[i] = &fb_info->fb[i];
895
896         status = dmub_srv_hw_init(dmub_srv, &hw_params);
897         if (status != DMUB_STATUS_OK) {
898                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
899                 return -EINVAL;
900         }
901
902         /* Wait for firmware load to finish. */
903         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
904         if (status != DMUB_STATUS_OK)
905                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
906
907         /* Init DMCU and ABM if available. */
908         if (dmcu && abm) {
909                 dmcu->funcs->dmcu_init(dmcu);
910                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
911         }
912
913         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
914         if (!adev->dm.dc->ctx->dmub_srv) {
915                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
916                 return -ENOMEM;
917         }
918
919         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
920                  adev->dm.dmcub_fw_version);
921
922         return 0;
923 }
924
925 #if defined(CONFIG_DRM_AMD_DC_DCN)
926 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
927 {
928         uint64_t pt_base;
929         uint32_t logical_addr_low;
930         uint32_t logical_addr_high;
931         uint32_t agp_base, agp_bot, agp_top;
932         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
933
934         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
935         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
936
937         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
938                 /*
939                  * Raven2 has a HW issue that it is unable to use the vram which
940                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
941                  * workaround that increase system aperture high address (add 1)
942                  * to get rid of the VM fault and hardware hang.
943                  */
944                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
945         else
946                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
947
948         agp_base = 0;
949         agp_bot = adev->gmc.agp_start >> 24;
950         agp_top = adev->gmc.agp_end >> 24;
951
952
953         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
954         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
955         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
956         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
957         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
958         page_table_base.low_part = lower_32_bits(pt_base);
959
960         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
961         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
962
963         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
964         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
965         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
966
967         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
968         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
969         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
970
971         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
972         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
973         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
974
975         pa_config->is_hvm_enabled = 0;
976
977 }
978 #endif
979 #if defined(CONFIG_DRM_AMD_DC_DCN)
980 static void event_mall_stutter(struct work_struct *work)
981 {
982
983         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
984         struct amdgpu_display_manager *dm = vblank_work->dm;
985
986         mutex_lock(&dm->dc_lock);
987
988         if (vblank_work->enable)
989                 dm->active_vblank_irq_count++;
990         else
991                 dm->active_vblank_irq_count--;
992
993         dc_allow_idle_optimizations(
994                 dm->dc, dm->active_vblank_irq_count == 0);
995
996         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
997
998
999         mutex_unlock(&dm->dc_lock);
1000 }
1001
1002 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1003 {
1004
1005         int max_caps = dc->caps.max_links;
1006         struct vblank_workqueue *vblank_work;
1007         int i = 0;
1008
1009         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1010         if (ZERO_OR_NULL_PTR(vblank_work)) {
1011                 kfree(vblank_work);
1012                 return NULL;
1013         }
1014
1015         for (i = 0; i < max_caps; i++)
1016                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1017
1018         return vblank_work;
1019 }
1020 #endif
1021 static int amdgpu_dm_init(struct amdgpu_device *adev)
1022 {
1023         struct dc_init_data init_data;
1024 #ifdef CONFIG_DRM_AMD_DC_HDCP
1025         struct dc_callback_init init_params;
1026 #endif
1027         int r;
1028
1029         adev->dm.ddev = adev_to_drm(adev);
1030         adev->dm.adev = adev;
1031
1032         /* Zero all the fields */
1033         memset(&init_data, 0, sizeof(init_data));
1034 #ifdef CONFIG_DRM_AMD_DC_HDCP
1035         memset(&init_params, 0, sizeof(init_params));
1036 #endif
1037
1038         mutex_init(&adev->dm.dc_lock);
1039         mutex_init(&adev->dm.audio_lock);
1040 #if defined(CONFIG_DRM_AMD_DC_DCN)
1041         spin_lock_init(&adev->dm.vblank_lock);
1042 #endif
1043
1044         if(amdgpu_dm_irq_init(adev)) {
1045                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1046                 goto error;
1047         }
1048
1049         init_data.asic_id.chip_family = adev->family;
1050
1051         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1052         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1053
1054         init_data.asic_id.vram_width = adev->gmc.vram_width;
1055         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1056         init_data.asic_id.atombios_base_address =
1057                 adev->mode_info.atom_context->bios;
1058
1059         init_data.driver = adev;
1060
1061         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1062
1063         if (!adev->dm.cgs_device) {
1064                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1065                 goto error;
1066         }
1067
1068         init_data.cgs_device = adev->dm.cgs_device;
1069
1070         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1071
1072         switch (adev->asic_type) {
1073         case CHIP_CARRIZO:
1074         case CHIP_STONEY:
1075         case CHIP_RAVEN:
1076         case CHIP_RENOIR:
1077                 init_data.flags.gpu_vm_support = true;
1078                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1079                         init_data.flags.disable_dmcu = true;
1080                 break;
1081 #if defined(CONFIG_DRM_AMD_DC_DCN)
1082         case CHIP_VANGOGH:
1083                 init_data.flags.gpu_vm_support = true;
1084                 break;
1085 #endif
1086         default:
1087                 break;
1088         }
1089
1090         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1091                 init_data.flags.fbc_support = true;
1092
1093         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1094                 init_data.flags.multi_mon_pp_mclk_switch = true;
1095
1096         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1097                 init_data.flags.disable_fractional_pwm = true;
1098
1099         init_data.flags.power_down_display_on_boot = true;
1100
1101         INIT_LIST_HEAD(&adev->dm.da_list);
1102         /* Display Core create. */
1103         adev->dm.dc = dc_create(&init_data);
1104
1105         if (adev->dm.dc) {
1106                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1107         } else {
1108                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1109                 goto error;
1110         }
1111
1112         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1113                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1114                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1115         }
1116
1117         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1118                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1119
1120         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1121                 adev->dm.dc->debug.disable_stutter = true;
1122
1123         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1124                 adev->dm.dc->debug.disable_dsc = true;
1125
1126         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1127                 adev->dm.dc->debug.disable_clock_gate = true;
1128
1129         r = dm_dmub_hw_init(adev);
1130         if (r) {
1131                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1132                 goto error;
1133         }
1134
1135         dc_hardware_init(adev->dm.dc);
1136
1137 #if defined(CONFIG_DRM_AMD_DC_DCN)
1138         if (adev->apu_flags) {
1139                 struct dc_phy_addr_space_config pa_config;
1140
1141                 mmhub_read_system_context(adev, &pa_config);
1142
1143                 // Call the DC init_memory func
1144                 dc_setup_system_context(adev->dm.dc, &pa_config);
1145         }
1146 #endif
1147
1148         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1149         if (!adev->dm.freesync_module) {
1150                 DRM_ERROR(
1151                 "amdgpu: failed to initialize freesync_module.\n");
1152         } else
1153                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1154                                 adev->dm.freesync_module);
1155
1156         amdgpu_dm_init_color_mod();
1157
1158 #if defined(CONFIG_DRM_AMD_DC_DCN)
1159         if (adev->dm.dc->caps.max_links > 0) {
1160                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1161
1162                 if (!adev->dm.vblank_workqueue)
1163                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1164                 else
1165                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1166         }
1167 #endif
1168
1169 #ifdef CONFIG_DRM_AMD_DC_HDCP
1170         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1171                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1172
1173                 if (!adev->dm.hdcp_workqueue)
1174                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1175                 else
1176                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1177
1178                 dc_init_callbacks(adev->dm.dc, &init_params);
1179         }
1180 #endif
1181 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1182         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1183 #endif
1184         if (amdgpu_dm_initialize_drm_device(adev)) {
1185                 DRM_ERROR(
1186                 "amdgpu: failed to initialize sw for display support.\n");
1187                 goto error;
1188         }
1189
1190         /* create fake encoders for MST */
1191         dm_dp_create_fake_mst_encoders(adev);
1192
1193         /* TODO: Add_display_info? */
1194
1195         /* TODO use dynamic cursor width */
1196         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1197         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1198
1199         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1200                 DRM_ERROR(
1201                 "amdgpu: failed to initialize sw for display support.\n");
1202                 goto error;
1203         }
1204
1205
1206         DRM_DEBUG_DRIVER("KMS initialized.\n");
1207
1208         return 0;
1209 error:
1210         amdgpu_dm_fini(adev);
1211
1212         return -EINVAL;
1213 }
1214
1215 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1216 {
1217         int i;
1218
1219         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1220                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1221         }
1222
1223         amdgpu_dm_audio_fini(adev);
1224
1225         amdgpu_dm_destroy_drm_device(&adev->dm);
1226
1227 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1228         if (adev->dm.crc_rd_wrk) {
1229                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1230                 kfree(adev->dm.crc_rd_wrk);
1231                 adev->dm.crc_rd_wrk = NULL;
1232         }
1233 #endif
1234 #ifdef CONFIG_DRM_AMD_DC_HDCP
1235         if (adev->dm.hdcp_workqueue) {
1236                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1237                 adev->dm.hdcp_workqueue = NULL;
1238         }
1239
1240         if (adev->dm.dc)
1241                 dc_deinit_callbacks(adev->dm.dc);
1242 #endif
1243
1244 #if defined(CONFIG_DRM_AMD_DC_DCN)
1245         if (adev->dm.vblank_workqueue) {
1246                 adev->dm.vblank_workqueue->dm = NULL;
1247                 kfree(adev->dm.vblank_workqueue);
1248                 adev->dm.vblank_workqueue = NULL;
1249         }
1250 #endif
1251
1252         if (adev->dm.dc->ctx->dmub_srv) {
1253                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1254                 adev->dm.dc->ctx->dmub_srv = NULL;
1255         }
1256
1257         if (adev->dm.dmub_bo)
1258                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1259                                       &adev->dm.dmub_bo_gpu_addr,
1260                                       &adev->dm.dmub_bo_cpu_addr);
1261
1262         /* DC Destroy TODO: Replace destroy DAL */
1263         if (adev->dm.dc)
1264                 dc_destroy(&adev->dm.dc);
1265         /*
1266          * TODO: pageflip, vlank interrupt
1267          *
1268          * amdgpu_dm_irq_fini(adev);
1269          */
1270
1271         if (adev->dm.cgs_device) {
1272                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1273                 adev->dm.cgs_device = NULL;
1274         }
1275         if (adev->dm.freesync_module) {
1276                 mod_freesync_destroy(adev->dm.freesync_module);
1277                 adev->dm.freesync_module = NULL;
1278         }
1279
1280         mutex_destroy(&adev->dm.audio_lock);
1281         mutex_destroy(&adev->dm.dc_lock);
1282
1283         return;
1284 }
1285
1286 static int load_dmcu_fw(struct amdgpu_device *adev)
1287 {
1288         const char *fw_name_dmcu = NULL;
1289         int r;
1290         const struct dmcu_firmware_header_v1_0 *hdr;
1291
1292         switch(adev->asic_type) {
1293 #if defined(CONFIG_DRM_AMD_DC_SI)
1294         case CHIP_TAHITI:
1295         case CHIP_PITCAIRN:
1296         case CHIP_VERDE:
1297         case CHIP_OLAND:
1298 #endif
1299         case CHIP_BONAIRE:
1300         case CHIP_HAWAII:
1301         case CHIP_KAVERI:
1302         case CHIP_KABINI:
1303         case CHIP_MULLINS:
1304         case CHIP_TONGA:
1305         case CHIP_FIJI:
1306         case CHIP_CARRIZO:
1307         case CHIP_STONEY:
1308         case CHIP_POLARIS11:
1309         case CHIP_POLARIS10:
1310         case CHIP_POLARIS12:
1311         case CHIP_VEGAM:
1312         case CHIP_VEGA10:
1313         case CHIP_VEGA12:
1314         case CHIP_VEGA20:
1315         case CHIP_NAVI10:
1316         case CHIP_NAVI14:
1317         case CHIP_RENOIR:
1318         case CHIP_SIENNA_CICHLID:
1319         case CHIP_NAVY_FLOUNDER:
1320         case CHIP_DIMGREY_CAVEFISH:
1321         case CHIP_VANGOGH:
1322                 return 0;
1323         case CHIP_NAVI12:
1324                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1325                 break;
1326         case CHIP_RAVEN:
1327                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1328                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1329                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1330                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1331                 else
1332                         return 0;
1333                 break;
1334         default:
1335                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1336                 return -EINVAL;
1337         }
1338
1339         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1340                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1341                 return 0;
1342         }
1343
1344         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1345         if (r == -ENOENT) {
1346                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1347                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1348                 adev->dm.fw_dmcu = NULL;
1349                 return 0;
1350         }
1351         if (r) {
1352                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1353                         fw_name_dmcu);
1354                 return r;
1355         }
1356
1357         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1358         if (r) {
1359                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1360                         fw_name_dmcu);
1361                 release_firmware(adev->dm.fw_dmcu);
1362                 adev->dm.fw_dmcu = NULL;
1363                 return r;
1364         }
1365
1366         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1367         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1368         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1369         adev->firmware.fw_size +=
1370                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1371
1372         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1373         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1374         adev->firmware.fw_size +=
1375                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1376
1377         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1378
1379         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1380
1381         return 0;
1382 }
1383
1384 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1385 {
1386         struct amdgpu_device *adev = ctx;
1387
1388         return dm_read_reg(adev->dm.dc->ctx, address);
1389 }
1390
1391 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1392                                      uint32_t value)
1393 {
1394         struct amdgpu_device *adev = ctx;
1395
1396         return dm_write_reg(adev->dm.dc->ctx, address, value);
1397 }
1398
1399 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1400 {
1401         struct dmub_srv_create_params create_params;
1402         struct dmub_srv_region_params region_params;
1403         struct dmub_srv_region_info region_info;
1404         struct dmub_srv_fb_params fb_params;
1405         struct dmub_srv_fb_info *fb_info;
1406         struct dmub_srv *dmub_srv;
1407         const struct dmcub_firmware_header_v1_0 *hdr;
1408         const char *fw_name_dmub;
1409         enum dmub_asic dmub_asic;
1410         enum dmub_status status;
1411         int r;
1412
1413         switch (adev->asic_type) {
1414         case CHIP_RENOIR:
1415                 dmub_asic = DMUB_ASIC_DCN21;
1416                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1417                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1418                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1419                 break;
1420         case CHIP_SIENNA_CICHLID:
1421                 dmub_asic = DMUB_ASIC_DCN30;
1422                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1423                 break;
1424         case CHIP_NAVY_FLOUNDER:
1425                 dmub_asic = DMUB_ASIC_DCN30;
1426                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1427                 break;
1428         case CHIP_VANGOGH:
1429                 dmub_asic = DMUB_ASIC_DCN301;
1430                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1431                 break;
1432         case CHIP_DIMGREY_CAVEFISH:
1433                 dmub_asic = DMUB_ASIC_DCN302;
1434                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1435                 break;
1436
1437         default:
1438                 /* ASIC doesn't support DMUB. */
1439                 return 0;
1440         }
1441
1442         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1443         if (r) {
1444                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1445                 return 0;
1446         }
1447
1448         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1449         if (r) {
1450                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1451                 return 0;
1452         }
1453
1454         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1455
1456         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1457                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1458                         AMDGPU_UCODE_ID_DMCUB;
1459                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1460                         adev->dm.dmub_fw;
1461                 adev->firmware.fw_size +=
1462                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1463
1464                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1465                          adev->dm.dmcub_fw_version);
1466         }
1467
1468         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1469
1470         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1471         dmub_srv = adev->dm.dmub_srv;
1472
1473         if (!dmub_srv) {
1474                 DRM_ERROR("Failed to allocate DMUB service!\n");
1475                 return -ENOMEM;
1476         }
1477
1478         memset(&create_params, 0, sizeof(create_params));
1479         create_params.user_ctx = adev;
1480         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1481         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1482         create_params.asic = dmub_asic;
1483
1484         /* Create the DMUB service. */
1485         status = dmub_srv_create(dmub_srv, &create_params);
1486         if (status != DMUB_STATUS_OK) {
1487                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1488                 return -EINVAL;
1489         }
1490
1491         /* Calculate the size of all the regions for the DMUB service. */
1492         memset(&region_params, 0, sizeof(region_params));
1493
1494         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1495                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1496         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1497         region_params.vbios_size = adev->bios_size;
1498         region_params.fw_bss_data = region_params.bss_data_size ?
1499                 adev->dm.dmub_fw->data +
1500                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1501                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1502         region_params.fw_inst_const =
1503                 adev->dm.dmub_fw->data +
1504                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1505                 PSP_HEADER_BYTES;
1506
1507         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1508                                            &region_info);
1509
1510         if (status != DMUB_STATUS_OK) {
1511                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1512                 return -EINVAL;
1513         }
1514
1515         /*
1516          * Allocate a framebuffer based on the total size of all the regions.
1517          * TODO: Move this into GART.
1518          */
1519         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1520                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1521                                     &adev->dm.dmub_bo_gpu_addr,
1522                                     &adev->dm.dmub_bo_cpu_addr);
1523         if (r)
1524                 return r;
1525
1526         /* Rebase the regions on the framebuffer address. */
1527         memset(&fb_params, 0, sizeof(fb_params));
1528         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1529         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1530         fb_params.region_info = &region_info;
1531
1532         adev->dm.dmub_fb_info =
1533                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1534         fb_info = adev->dm.dmub_fb_info;
1535
1536         if (!fb_info) {
1537                 DRM_ERROR(
1538                         "Failed to allocate framebuffer info for DMUB service!\n");
1539                 return -ENOMEM;
1540         }
1541
1542         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1543         if (status != DMUB_STATUS_OK) {
1544                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1545                 return -EINVAL;
1546         }
1547
1548         return 0;
1549 }
1550
1551 static int dm_sw_init(void *handle)
1552 {
1553         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1554         int r;
1555
1556         r = dm_dmub_sw_init(adev);
1557         if (r)
1558                 return r;
1559
1560         return load_dmcu_fw(adev);
1561 }
1562
1563 static int dm_sw_fini(void *handle)
1564 {
1565         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1566
1567         kfree(adev->dm.dmub_fb_info);
1568         adev->dm.dmub_fb_info = NULL;
1569
1570         if (adev->dm.dmub_srv) {
1571                 dmub_srv_destroy(adev->dm.dmub_srv);
1572                 adev->dm.dmub_srv = NULL;
1573         }
1574
1575         release_firmware(adev->dm.dmub_fw);
1576         adev->dm.dmub_fw = NULL;
1577
1578         release_firmware(adev->dm.fw_dmcu);
1579         adev->dm.fw_dmcu = NULL;
1580
1581         return 0;
1582 }
1583
1584 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1585 {
1586         struct amdgpu_dm_connector *aconnector;
1587         struct drm_connector *connector;
1588         struct drm_connector_list_iter iter;
1589         int ret = 0;
1590
1591         drm_connector_list_iter_begin(dev, &iter);
1592         drm_for_each_connector_iter(connector, &iter) {
1593                 aconnector = to_amdgpu_dm_connector(connector);
1594                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1595                     aconnector->mst_mgr.aux) {
1596                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1597                                          aconnector,
1598                                          aconnector->base.base.id);
1599
1600                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1601                         if (ret < 0) {
1602                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1603                                 aconnector->dc_link->type =
1604                                         dc_connection_single;
1605                                 break;
1606                         }
1607                 }
1608         }
1609         drm_connector_list_iter_end(&iter);
1610
1611         return ret;
1612 }
1613
1614 static int dm_late_init(void *handle)
1615 {
1616         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617
1618         struct dmcu_iram_parameters params;
1619         unsigned int linear_lut[16];
1620         int i;
1621         struct dmcu *dmcu = NULL;
1622         bool ret = true;
1623
1624         dmcu = adev->dm.dc->res_pool->dmcu;
1625
1626         for (i = 0; i < 16; i++)
1627                 linear_lut[i] = 0xFFFF * i / 15;
1628
1629         params.set = 0;
1630         params.backlight_ramping_start = 0xCCCC;
1631         params.backlight_ramping_reduction = 0xCCCCCCCC;
1632         params.backlight_lut_array_size = 16;
1633         params.backlight_lut_array = linear_lut;
1634
1635         /* Min backlight level after ABM reduction,  Don't allow below 1%
1636          * 0xFFFF x 0.01 = 0x28F
1637          */
1638         params.min_abm_backlight = 0x28F;
1639
1640         /* In the case where abm is implemented on dmcub,
1641          * dmcu object will be null.
1642          * ABM 2.4 and up are implemented on dmcub.
1643          */
1644         if (dmcu)
1645                 ret = dmcu_load_iram(dmcu, params);
1646         else if (adev->dm.dc->ctx->dmub_srv)
1647                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1648
1649         if (!ret)
1650                 return -EINVAL;
1651
1652         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1653 }
1654
1655 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1656 {
1657         struct amdgpu_dm_connector *aconnector;
1658         struct drm_connector *connector;
1659         struct drm_connector_list_iter iter;
1660         struct drm_dp_mst_topology_mgr *mgr;
1661         int ret;
1662         bool need_hotplug = false;
1663
1664         drm_connector_list_iter_begin(dev, &iter);
1665         drm_for_each_connector_iter(connector, &iter) {
1666                 aconnector = to_amdgpu_dm_connector(connector);
1667                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1668                     aconnector->mst_port)
1669                         continue;
1670
1671                 mgr = &aconnector->mst_mgr;
1672
1673                 if (suspend) {
1674                         drm_dp_mst_topology_mgr_suspend(mgr);
1675                 } else {
1676                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1677                         if (ret < 0) {
1678                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1679                                 need_hotplug = true;
1680                         }
1681                 }
1682         }
1683         drm_connector_list_iter_end(&iter);
1684
1685         if (need_hotplug)
1686                 drm_kms_helper_hotplug_event(dev);
1687 }
1688
1689 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1690 {
1691         struct smu_context *smu = &adev->smu;
1692         int ret = 0;
1693
1694         if (!is_support_sw_smu(adev))
1695                 return 0;
1696
1697         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1698          * on window driver dc implementation.
1699          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1700          * should be passed to smu during boot up and resume from s3.
1701          * boot up: dc calculate dcn watermark clock settings within dc_create,
1702          * dcn20_resource_construct
1703          * then call pplib functions below to pass the settings to smu:
1704          * smu_set_watermarks_for_clock_ranges
1705          * smu_set_watermarks_table
1706          * navi10_set_watermarks_table
1707          * smu_write_watermarks_table
1708          *
1709          * For Renoir, clock settings of dcn watermark are also fixed values.
1710          * dc has implemented different flow for window driver:
1711          * dc_hardware_init / dc_set_power_state
1712          * dcn10_init_hw
1713          * notify_wm_ranges
1714          * set_wm_ranges
1715          * -- Linux
1716          * smu_set_watermarks_for_clock_ranges
1717          * renoir_set_watermarks_table
1718          * smu_write_watermarks_table
1719          *
1720          * For Linux,
1721          * dc_hardware_init -> amdgpu_dm_init
1722          * dc_set_power_state --> dm_resume
1723          *
1724          * therefore, this function apply to navi10/12/14 but not Renoir
1725          * *
1726          */
1727         switch(adev->asic_type) {
1728         case CHIP_NAVI10:
1729         case CHIP_NAVI14:
1730         case CHIP_NAVI12:
1731                 break;
1732         default:
1733                 return 0;
1734         }
1735
1736         ret = smu_write_watermarks_table(smu);
1737         if (ret) {
1738                 DRM_ERROR("Failed to update WMTABLE!\n");
1739                 return ret;
1740         }
1741
1742         return 0;
1743 }
1744
1745 /**
1746  * dm_hw_init() - Initialize DC device
1747  * @handle: The base driver device containing the amdgpu_dm device.
1748  *
1749  * Initialize the &struct amdgpu_display_manager device. This involves calling
1750  * the initializers of each DM component, then populating the struct with them.
1751  *
1752  * Although the function implies hardware initialization, both hardware and
1753  * software are initialized here. Splitting them out to their relevant init
1754  * hooks is a future TODO item.
1755  *
1756  * Some notable things that are initialized here:
1757  *
1758  * - Display Core, both software and hardware
1759  * - DC modules that we need (freesync and color management)
1760  * - DRM software states
1761  * - Interrupt sources and handlers
1762  * - Vblank support
1763  * - Debug FS entries, if enabled
1764  */
1765 static int dm_hw_init(void *handle)
1766 {
1767         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1768         /* Create DAL display manager */
1769         amdgpu_dm_init(adev);
1770         amdgpu_dm_hpd_init(adev);
1771
1772         return 0;
1773 }
1774
1775 /**
1776  * dm_hw_fini() - Teardown DC device
1777  * @handle: The base driver device containing the amdgpu_dm device.
1778  *
1779  * Teardown components within &struct amdgpu_display_manager that require
1780  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1781  * were loaded. Also flush IRQ workqueues and disable them.
1782  */
1783 static int dm_hw_fini(void *handle)
1784 {
1785         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1786
1787         amdgpu_dm_hpd_fini(adev);
1788
1789         amdgpu_dm_irq_fini(adev);
1790         amdgpu_dm_fini(adev);
1791         return 0;
1792 }
1793
1794
1795 static int dm_enable_vblank(struct drm_crtc *crtc);
1796 static void dm_disable_vblank(struct drm_crtc *crtc);
1797
1798 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1799                                  struct dc_state *state, bool enable)
1800 {
1801         enum dc_irq_source irq_source;
1802         struct amdgpu_crtc *acrtc;
1803         int rc = -EBUSY;
1804         int i = 0;
1805
1806         for (i = 0; i < state->stream_count; i++) {
1807                 acrtc = get_crtc_by_otg_inst(
1808                                 adev, state->stream_status[i].primary_otg_inst);
1809
1810                 if (acrtc && state->stream_status[i].plane_count != 0) {
1811                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1812                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1813                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1814                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1815                         if (rc)
1816                                 DRM_WARN("Failed to %s pflip interrupts\n",
1817                                          enable ? "enable" : "disable");
1818
1819                         if (enable) {
1820                                 rc = dm_enable_vblank(&acrtc->base);
1821                                 if (rc)
1822                                         DRM_WARN("Failed to enable vblank interrupts\n");
1823                         } else {
1824                                 dm_disable_vblank(&acrtc->base);
1825                         }
1826
1827                 }
1828         }
1829
1830 }
1831
1832 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1833 {
1834         struct dc_state *context = NULL;
1835         enum dc_status res = DC_ERROR_UNEXPECTED;
1836         int i;
1837         struct dc_stream_state *del_streams[MAX_PIPES];
1838         int del_streams_count = 0;
1839
1840         memset(del_streams, 0, sizeof(del_streams));
1841
1842         context = dc_create_state(dc);
1843         if (context == NULL)
1844                 goto context_alloc_fail;
1845
1846         dc_resource_state_copy_construct_current(dc, context);
1847
1848         /* First remove from context all streams */
1849         for (i = 0; i < context->stream_count; i++) {
1850                 struct dc_stream_state *stream = context->streams[i];
1851
1852                 del_streams[del_streams_count++] = stream;
1853         }
1854
1855         /* Remove all planes for removed streams and then remove the streams */
1856         for (i = 0; i < del_streams_count; i++) {
1857                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1858                         res = DC_FAIL_DETACH_SURFACES;
1859                         goto fail;
1860                 }
1861
1862                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1863                 if (res != DC_OK)
1864                         goto fail;
1865         }
1866
1867
1868         res = dc_validate_global_state(dc, context, false);
1869
1870         if (res != DC_OK) {
1871                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1872                 goto fail;
1873         }
1874
1875         res = dc_commit_state(dc, context);
1876
1877 fail:
1878         dc_release_state(context);
1879
1880 context_alloc_fail:
1881         return res;
1882 }
1883
1884 static int dm_suspend(void *handle)
1885 {
1886         struct amdgpu_device *adev = handle;
1887         struct amdgpu_display_manager *dm = &adev->dm;
1888         int ret = 0;
1889
1890         if (amdgpu_in_reset(adev)) {
1891                 mutex_lock(&dm->dc_lock);
1892
1893 #if defined(CONFIG_DRM_AMD_DC_DCN)
1894                 dc_allow_idle_optimizations(adev->dm.dc, false);
1895 #endif
1896
1897                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1898
1899                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1900
1901                 amdgpu_dm_commit_zero_streams(dm->dc);
1902
1903                 amdgpu_dm_irq_suspend(adev);
1904
1905                 return ret;
1906         }
1907
1908 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1909         amdgpu_dm_crtc_secure_display_suspend(adev);
1910 #endif
1911         WARN_ON(adev->dm.cached_state);
1912         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1913
1914         s3_handle_mst(adev_to_drm(adev), true);
1915
1916         amdgpu_dm_irq_suspend(adev);
1917
1918
1919         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1920
1921         return 0;
1922 }
1923
1924 static struct amdgpu_dm_connector *
1925 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1926                                              struct drm_crtc *crtc)
1927 {
1928         uint32_t i;
1929         struct drm_connector_state *new_con_state;
1930         struct drm_connector *connector;
1931         struct drm_crtc *crtc_from_state;
1932
1933         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1934                 crtc_from_state = new_con_state->crtc;
1935
1936                 if (crtc_from_state == crtc)
1937                         return to_amdgpu_dm_connector(connector);
1938         }
1939
1940         return NULL;
1941 }
1942
1943 static void emulated_link_detect(struct dc_link *link)
1944 {
1945         struct dc_sink_init_data sink_init_data = { 0 };
1946         struct display_sink_capability sink_caps = { 0 };
1947         enum dc_edid_status edid_status;
1948         struct dc_context *dc_ctx = link->ctx;
1949         struct dc_sink *sink = NULL;
1950         struct dc_sink *prev_sink = NULL;
1951
1952         link->type = dc_connection_none;
1953         prev_sink = link->local_sink;
1954
1955         if (prev_sink)
1956                 dc_sink_release(prev_sink);
1957
1958         switch (link->connector_signal) {
1959         case SIGNAL_TYPE_HDMI_TYPE_A: {
1960                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1961                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1962                 break;
1963         }
1964
1965         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1966                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1967                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1968                 break;
1969         }
1970
1971         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1972                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1973                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1974                 break;
1975         }
1976
1977         case SIGNAL_TYPE_LVDS: {
1978                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1979                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1980                 break;
1981         }
1982
1983         case SIGNAL_TYPE_EDP: {
1984                 sink_caps.transaction_type =
1985                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1986                 sink_caps.signal = SIGNAL_TYPE_EDP;
1987                 break;
1988         }
1989
1990         case SIGNAL_TYPE_DISPLAY_PORT: {
1991                 sink_caps.transaction_type =
1992                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1993                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1994                 break;
1995         }
1996
1997         default:
1998                 DC_ERROR("Invalid connector type! signal:%d\n",
1999                         link->connector_signal);
2000                 return;
2001         }
2002
2003         sink_init_data.link = link;
2004         sink_init_data.sink_signal = sink_caps.signal;
2005
2006         sink = dc_sink_create(&sink_init_data);
2007         if (!sink) {
2008                 DC_ERROR("Failed to create sink!\n");
2009                 return;
2010         }
2011
2012         /* dc_sink_create returns a new reference */
2013         link->local_sink = sink;
2014
2015         edid_status = dm_helpers_read_local_edid(
2016                         link->ctx,
2017                         link,
2018                         sink);
2019
2020         if (edid_status != EDID_OK)
2021                 DC_ERROR("Failed to read EDID");
2022
2023 }
2024
2025 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2026                                      struct amdgpu_display_manager *dm)
2027 {
2028         struct {
2029                 struct dc_surface_update surface_updates[MAX_SURFACES];
2030                 struct dc_plane_info plane_infos[MAX_SURFACES];
2031                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2032                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2033                 struct dc_stream_update stream_update;
2034         } * bundle;
2035         int k, m;
2036
2037         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2038
2039         if (!bundle) {
2040                 dm_error("Failed to allocate update bundle\n");
2041                 goto cleanup;
2042         }
2043
2044         for (k = 0; k < dc_state->stream_count; k++) {
2045                 bundle->stream_update.stream = dc_state->streams[k];
2046
2047                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2048                         bundle->surface_updates[m].surface =
2049                                 dc_state->stream_status->plane_states[m];
2050                         bundle->surface_updates[m].surface->force_full_update =
2051                                 true;
2052                 }
2053                 dc_commit_updates_for_stream(
2054                         dm->dc, bundle->surface_updates,
2055                         dc_state->stream_status->plane_count,
2056                         dc_state->streams[k], &bundle->stream_update, dc_state);
2057         }
2058
2059 cleanup:
2060         kfree(bundle);
2061
2062         return;
2063 }
2064
2065 static void dm_set_dpms_off(struct dc_link *link)
2066 {
2067         struct dc_stream_state *stream_state;
2068         struct amdgpu_dm_connector *aconnector = link->priv;
2069         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2070         struct dc_stream_update stream_update;
2071         bool dpms_off = true;
2072
2073         memset(&stream_update, 0, sizeof(stream_update));
2074         stream_update.dpms_off = &dpms_off;
2075
2076         mutex_lock(&adev->dm.dc_lock);
2077         stream_state = dc_stream_find_from_link(link);
2078
2079         if (stream_state == NULL) {
2080                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2081                 mutex_unlock(&adev->dm.dc_lock);
2082                 return;
2083         }
2084
2085         stream_update.stream = stream_state;
2086         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2087                                      stream_state, &stream_update,
2088                                      stream_state->ctx->dc->current_state);
2089         mutex_unlock(&adev->dm.dc_lock);
2090 }
2091
2092 static int dm_resume(void *handle)
2093 {
2094         struct amdgpu_device *adev = handle;
2095         struct drm_device *ddev = adev_to_drm(adev);
2096         struct amdgpu_display_manager *dm = &adev->dm;
2097         struct amdgpu_dm_connector *aconnector;
2098         struct drm_connector *connector;
2099         struct drm_connector_list_iter iter;
2100         struct drm_crtc *crtc;
2101         struct drm_crtc_state *new_crtc_state;
2102         struct dm_crtc_state *dm_new_crtc_state;
2103         struct drm_plane *plane;
2104         struct drm_plane_state *new_plane_state;
2105         struct dm_plane_state *dm_new_plane_state;
2106         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2107         enum dc_connection_type new_connection_type = dc_connection_none;
2108         struct dc_state *dc_state;
2109         int i, r, j;
2110
2111         if (amdgpu_in_reset(adev)) {
2112                 dc_state = dm->cached_dc_state;
2113
2114                 r = dm_dmub_hw_init(adev);
2115                 if (r)
2116                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2117
2118                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2119                 dc_resume(dm->dc);
2120
2121                 amdgpu_dm_irq_resume_early(adev);
2122
2123                 for (i = 0; i < dc_state->stream_count; i++) {
2124                         dc_state->streams[i]->mode_changed = true;
2125                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2126                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2127                                         = 0xffffffff;
2128                         }
2129                 }
2130
2131                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2132
2133                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2134
2135                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2136
2137                 dc_release_state(dm->cached_dc_state);
2138                 dm->cached_dc_state = NULL;
2139
2140                 amdgpu_dm_irq_resume_late(adev);
2141
2142                 mutex_unlock(&dm->dc_lock);
2143
2144                 return 0;
2145         }
2146         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2147         dc_release_state(dm_state->context);
2148         dm_state->context = dc_create_state(dm->dc);
2149         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2150         dc_resource_state_construct(dm->dc, dm_state->context);
2151
2152         /* Before powering on DC we need to re-initialize DMUB. */
2153         r = dm_dmub_hw_init(adev);
2154         if (r)
2155                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157         /* power on hardware */
2158         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2159
2160         /* program HPD filter */
2161         dc_resume(dm->dc);
2162
2163         /*
2164          * early enable HPD Rx IRQ, should be done before set mode as short
2165          * pulse interrupts are used for MST
2166          */
2167         amdgpu_dm_irq_resume_early(adev);
2168
2169         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2170         s3_handle_mst(ddev, false);
2171
2172         /* Do detection*/
2173         drm_connector_list_iter_begin(ddev, &iter);
2174         drm_for_each_connector_iter(connector, &iter) {
2175                 aconnector = to_amdgpu_dm_connector(connector);
2176
2177                 /*
2178                  * this is the case when traversing through already created
2179                  * MST connectors, should be skipped
2180                  */
2181                 if (aconnector->mst_port)
2182                         continue;
2183
2184                 mutex_lock(&aconnector->hpd_lock);
2185                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2186                         DRM_ERROR("KMS: Failed to detect connector\n");
2187
2188                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2189                         emulated_link_detect(aconnector->dc_link);
2190                 else
2191                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2192
2193                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2194                         aconnector->fake_enable = false;
2195
2196                 if (aconnector->dc_sink)
2197                         dc_sink_release(aconnector->dc_sink);
2198                 aconnector->dc_sink = NULL;
2199                 amdgpu_dm_update_connector_after_detect(aconnector);
2200                 mutex_unlock(&aconnector->hpd_lock);
2201         }
2202         drm_connector_list_iter_end(&iter);
2203
2204         /* Force mode set in atomic commit */
2205         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2206                 new_crtc_state->active_changed = true;
2207
2208         /*
2209          * atomic_check is expected to create the dc states. We need to release
2210          * them here, since they were duplicated as part of the suspend
2211          * procedure.
2212          */
2213         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2214                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2215                 if (dm_new_crtc_state->stream) {
2216                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2217                         dc_stream_release(dm_new_crtc_state->stream);
2218                         dm_new_crtc_state->stream = NULL;
2219                 }
2220         }
2221
2222         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2223                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2224                 if (dm_new_plane_state->dc_state) {
2225                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2226                         dc_plane_state_release(dm_new_plane_state->dc_state);
2227                         dm_new_plane_state->dc_state = NULL;
2228                 }
2229         }
2230
2231         drm_atomic_helper_resume(ddev, dm->cached_state);
2232
2233         dm->cached_state = NULL;
2234
2235 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2236         amdgpu_dm_crtc_secure_display_resume(adev);
2237 #endif
2238
2239         amdgpu_dm_irq_resume_late(adev);
2240
2241         amdgpu_dm_smu_write_watermarks_table(adev);
2242
2243         return 0;
2244 }
2245
2246 /**
2247  * DOC: DM Lifecycle
2248  *
2249  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2250  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2251  * the base driver's device list to be initialized and torn down accordingly.
2252  *
2253  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2254  */
2255
2256 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2257         .name = "dm",
2258         .early_init = dm_early_init,
2259         .late_init = dm_late_init,
2260         .sw_init = dm_sw_init,
2261         .sw_fini = dm_sw_fini,
2262         .hw_init = dm_hw_init,
2263         .hw_fini = dm_hw_fini,
2264         .suspend = dm_suspend,
2265         .resume = dm_resume,
2266         .is_idle = dm_is_idle,
2267         .wait_for_idle = dm_wait_for_idle,
2268         .check_soft_reset = dm_check_soft_reset,
2269         .soft_reset = dm_soft_reset,
2270         .set_clockgating_state = dm_set_clockgating_state,
2271         .set_powergating_state = dm_set_powergating_state,
2272 };
2273
2274 const struct amdgpu_ip_block_version dm_ip_block =
2275 {
2276         .type = AMD_IP_BLOCK_TYPE_DCE,
2277         .major = 1,
2278         .minor = 0,
2279         .rev = 0,
2280         .funcs = &amdgpu_dm_funcs,
2281 };
2282
2283
2284 /**
2285  * DOC: atomic
2286  *
2287  * *WIP*
2288  */
2289
2290 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2291         .fb_create = amdgpu_display_user_framebuffer_create,
2292         .get_format_info = amd_get_format_info,
2293         .output_poll_changed = drm_fb_helper_output_poll_changed,
2294         .atomic_check = amdgpu_dm_atomic_check,
2295         .atomic_commit = drm_atomic_helper_commit,
2296 };
2297
2298 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2299         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2300 };
2301
2302 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2303 {
2304         u32 max_cll, min_cll, max, min, q, r;
2305         struct amdgpu_dm_backlight_caps *caps;
2306         struct amdgpu_display_manager *dm;
2307         struct drm_connector *conn_base;
2308         struct amdgpu_device *adev;
2309         struct dc_link *link = NULL;
2310         static const u8 pre_computed_values[] = {
2311                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2312                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2313
2314         if (!aconnector || !aconnector->dc_link)
2315                 return;
2316
2317         link = aconnector->dc_link;
2318         if (link->connector_signal != SIGNAL_TYPE_EDP)
2319                 return;
2320
2321         conn_base = &aconnector->base;
2322         adev = drm_to_adev(conn_base->dev);
2323         dm = &adev->dm;
2324         caps = &dm->backlight_caps;
2325         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2326         caps->aux_support = false;
2327         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2328         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2329
2330         if (caps->ext_caps->bits.oled == 1 ||
2331             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2332             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2333                 caps->aux_support = true;
2334
2335         if (amdgpu_backlight == 0)
2336                 caps->aux_support = false;
2337         else if (amdgpu_backlight == 1)
2338                 caps->aux_support = true;
2339
2340         /* From the specification (CTA-861-G), for calculating the maximum
2341          * luminance we need to use:
2342          *      Luminance = 50*2**(CV/32)
2343          * Where CV is a one-byte value.
2344          * For calculating this expression we may need float point precision;
2345          * to avoid this complexity level, we take advantage that CV is divided
2346          * by a constant. From the Euclids division algorithm, we know that CV
2347          * can be written as: CV = 32*q + r. Next, we replace CV in the
2348          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2349          * need to pre-compute the value of r/32. For pre-computing the values
2350          * We just used the following Ruby line:
2351          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2352          * The results of the above expressions can be verified at
2353          * pre_computed_values.
2354          */
2355         q = max_cll >> 5;
2356         r = max_cll % 32;
2357         max = (1 << q) * pre_computed_values[r];
2358
2359         // min luminance: maxLum * (CV/255)^2 / 100
2360         q = DIV_ROUND_CLOSEST(min_cll, 255);
2361         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2362
2363         caps->aux_max_input_signal = max;
2364         caps->aux_min_input_signal = min;
2365 }
2366
2367 void amdgpu_dm_update_connector_after_detect(
2368                 struct amdgpu_dm_connector *aconnector)
2369 {
2370         struct drm_connector *connector = &aconnector->base;
2371         struct drm_device *dev = connector->dev;
2372         struct dc_sink *sink;
2373
2374         /* MST handled by drm_mst framework */
2375         if (aconnector->mst_mgr.mst_state == true)
2376                 return;
2377
2378         sink = aconnector->dc_link->local_sink;
2379         if (sink)
2380                 dc_sink_retain(sink);
2381
2382         /*
2383          * Edid mgmt connector gets first update only in mode_valid hook and then
2384          * the connector sink is set to either fake or physical sink depends on link status.
2385          * Skip if already done during boot.
2386          */
2387         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2388                         && aconnector->dc_em_sink) {
2389
2390                 /*
2391                  * For S3 resume with headless use eml_sink to fake stream
2392                  * because on resume connector->sink is set to NULL
2393                  */
2394                 mutex_lock(&dev->mode_config.mutex);
2395
2396                 if (sink) {
2397                         if (aconnector->dc_sink) {
2398                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2399                                 /*
2400                                  * retain and release below are used to
2401                                  * bump up refcount for sink because the link doesn't point
2402                                  * to it anymore after disconnect, so on next crtc to connector
2403                                  * reshuffle by UMD we will get into unwanted dc_sink release
2404                                  */
2405                                 dc_sink_release(aconnector->dc_sink);
2406                         }
2407                         aconnector->dc_sink = sink;
2408                         dc_sink_retain(aconnector->dc_sink);
2409                         amdgpu_dm_update_freesync_caps(connector,
2410                                         aconnector->edid);
2411                 } else {
2412                         amdgpu_dm_update_freesync_caps(connector, NULL);
2413                         if (!aconnector->dc_sink) {
2414                                 aconnector->dc_sink = aconnector->dc_em_sink;
2415                                 dc_sink_retain(aconnector->dc_sink);
2416                         }
2417                 }
2418
2419                 mutex_unlock(&dev->mode_config.mutex);
2420
2421                 if (sink)
2422                         dc_sink_release(sink);
2423                 return;
2424         }
2425
2426         /*
2427          * TODO: temporary guard to look for proper fix
2428          * if this sink is MST sink, we should not do anything
2429          */
2430         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2431                 dc_sink_release(sink);
2432                 return;
2433         }
2434
2435         if (aconnector->dc_sink == sink) {
2436                 /*
2437                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2438                  * Do nothing!!
2439                  */
2440                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2441                                 aconnector->connector_id);
2442                 if (sink)
2443                         dc_sink_release(sink);
2444                 return;
2445         }
2446
2447         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2448                 aconnector->connector_id, aconnector->dc_sink, sink);
2449
2450         mutex_lock(&dev->mode_config.mutex);
2451
2452         /*
2453          * 1. Update status of the drm connector
2454          * 2. Send an event and let userspace tell us what to do
2455          */
2456         if (sink) {
2457                 /*
2458                  * TODO: check if we still need the S3 mode update workaround.
2459                  * If yes, put it here.
2460                  */
2461                 if (aconnector->dc_sink) {
2462                         amdgpu_dm_update_freesync_caps(connector, NULL);
2463                         dc_sink_release(aconnector->dc_sink);
2464                 }
2465
2466                 aconnector->dc_sink = sink;
2467                 dc_sink_retain(aconnector->dc_sink);
2468                 if (sink->dc_edid.length == 0) {
2469                         aconnector->edid = NULL;
2470                         if (aconnector->dc_link->aux_mode) {
2471                                 drm_dp_cec_unset_edid(
2472                                         &aconnector->dm_dp_aux.aux);
2473                         }
2474                 } else {
2475                         aconnector->edid =
2476                                 (struct edid *)sink->dc_edid.raw_edid;
2477
2478                         drm_connector_update_edid_property(connector,
2479                                                            aconnector->edid);
2480                         if (aconnector->dc_link->aux_mode)
2481                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2482                                                     aconnector->edid);
2483                 }
2484
2485                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2486                 update_connector_ext_caps(aconnector);
2487         } else {
2488                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2489                 amdgpu_dm_update_freesync_caps(connector, NULL);
2490                 drm_connector_update_edid_property(connector, NULL);
2491                 aconnector->num_modes = 0;
2492                 dc_sink_release(aconnector->dc_sink);
2493                 aconnector->dc_sink = NULL;
2494                 aconnector->edid = NULL;
2495 #ifdef CONFIG_DRM_AMD_DC_HDCP
2496                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2497                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2498                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2499 #endif
2500         }
2501
2502         mutex_unlock(&dev->mode_config.mutex);
2503
2504         update_subconnector_property(aconnector);
2505
2506         if (sink)
2507                 dc_sink_release(sink);
2508 }
2509
2510 static void handle_hpd_irq(void *param)
2511 {
2512         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2513         struct drm_connector *connector = &aconnector->base;
2514         struct drm_device *dev = connector->dev;
2515         enum dc_connection_type new_connection_type = dc_connection_none;
2516 #ifdef CONFIG_DRM_AMD_DC_HDCP
2517         struct amdgpu_device *adev = drm_to_adev(dev);
2518         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2519 #endif
2520
2521         /*
2522          * In case of failure or MST no need to update connector status or notify the OS
2523          * since (for MST case) MST does this in its own context.
2524          */
2525         mutex_lock(&aconnector->hpd_lock);
2526
2527 #ifdef CONFIG_DRM_AMD_DC_HDCP
2528         if (adev->dm.hdcp_workqueue) {
2529                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2530                 dm_con_state->update_hdcp = true;
2531         }
2532 #endif
2533         if (aconnector->fake_enable)
2534                 aconnector->fake_enable = false;
2535
2536         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2537                 DRM_ERROR("KMS: Failed to detect connector\n");
2538
2539         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2540                 emulated_link_detect(aconnector->dc_link);
2541
2542
2543                 drm_modeset_lock_all(dev);
2544                 dm_restore_drm_connector_state(dev, connector);
2545                 drm_modeset_unlock_all(dev);
2546
2547                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2548                         drm_kms_helper_hotplug_event(dev);
2549
2550         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2551                 if (new_connection_type == dc_connection_none &&
2552                     aconnector->dc_link->type == dc_connection_none)
2553                         dm_set_dpms_off(aconnector->dc_link);
2554
2555                 amdgpu_dm_update_connector_after_detect(aconnector);
2556
2557                 drm_modeset_lock_all(dev);
2558                 dm_restore_drm_connector_state(dev, connector);
2559                 drm_modeset_unlock_all(dev);
2560
2561                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2562                         drm_kms_helper_hotplug_event(dev);
2563         }
2564         mutex_unlock(&aconnector->hpd_lock);
2565
2566 }
2567
2568 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2569 {
2570         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2571         uint8_t dret;
2572         bool new_irq_handled = false;
2573         int dpcd_addr;
2574         int dpcd_bytes_to_read;
2575
2576         const int max_process_count = 30;
2577         int process_count = 0;
2578
2579         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2580
2581         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2582                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2583                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2584                 dpcd_addr = DP_SINK_COUNT;
2585         } else {
2586                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2587                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2588                 dpcd_addr = DP_SINK_COUNT_ESI;
2589         }
2590
2591         dret = drm_dp_dpcd_read(
2592                 &aconnector->dm_dp_aux.aux,
2593                 dpcd_addr,
2594                 esi,
2595                 dpcd_bytes_to_read);
2596
2597         while (dret == dpcd_bytes_to_read &&
2598                 process_count < max_process_count) {
2599                 uint8_t retry;
2600                 dret = 0;
2601
2602                 process_count++;
2603
2604                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2605                 /* handle HPD short pulse irq */
2606                 if (aconnector->mst_mgr.mst_state)
2607                         drm_dp_mst_hpd_irq(
2608                                 &aconnector->mst_mgr,
2609                                 esi,
2610                                 &new_irq_handled);
2611
2612                 if (new_irq_handled) {
2613                         /* ACK at DPCD to notify down stream */
2614                         const int ack_dpcd_bytes_to_write =
2615                                 dpcd_bytes_to_read - 1;
2616
2617                         for (retry = 0; retry < 3; retry++) {
2618                                 uint8_t wret;
2619
2620                                 wret = drm_dp_dpcd_write(
2621                                         &aconnector->dm_dp_aux.aux,
2622                                         dpcd_addr + 1,
2623                                         &esi[1],
2624                                         ack_dpcd_bytes_to_write);
2625                                 if (wret == ack_dpcd_bytes_to_write)
2626                                         break;
2627                         }
2628
2629                         /* check if there is new irq to be handled */
2630                         dret = drm_dp_dpcd_read(
2631                                 &aconnector->dm_dp_aux.aux,
2632                                 dpcd_addr,
2633                                 esi,
2634                                 dpcd_bytes_to_read);
2635
2636                         new_irq_handled = false;
2637                 } else {
2638                         break;
2639                 }
2640         }
2641
2642         if (process_count == max_process_count)
2643                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2644 }
2645
2646 static void handle_hpd_rx_irq(void *param)
2647 {
2648         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2649         struct drm_connector *connector = &aconnector->base;
2650         struct drm_device *dev = connector->dev;
2651         struct dc_link *dc_link = aconnector->dc_link;
2652         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2653         bool result = false;
2654         enum dc_connection_type new_connection_type = dc_connection_none;
2655         struct amdgpu_device *adev = drm_to_adev(dev);
2656         union hpd_irq_data hpd_irq_data;
2657
2658         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2659
2660         /*
2661          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2662          * conflict, after implement i2c helper, this mutex should be
2663          * retired.
2664          */
2665         if (dc_link->type != dc_connection_mst_branch)
2666                 mutex_lock(&aconnector->hpd_lock);
2667
2668         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2669
2670         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2671                 (dc_link->type == dc_connection_mst_branch)) {
2672                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2673                         result = true;
2674                         dm_handle_hpd_rx_irq(aconnector);
2675                         goto out;
2676                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2677                         result = false;
2678                         dm_handle_hpd_rx_irq(aconnector);
2679                         goto out;
2680                 }
2681         }
2682
2683         mutex_lock(&adev->dm.dc_lock);
2684 #ifdef CONFIG_DRM_AMD_DC_HDCP
2685         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2686 #else
2687         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2688 #endif
2689         mutex_unlock(&adev->dm.dc_lock);
2690
2691 out:
2692         if (result && !is_mst_root_connector) {
2693                 /* Downstream Port status changed. */
2694                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2695                         DRM_ERROR("KMS: Failed to detect connector\n");
2696
2697                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2698                         emulated_link_detect(dc_link);
2699
2700                         if (aconnector->fake_enable)
2701                                 aconnector->fake_enable = false;
2702
2703                         amdgpu_dm_update_connector_after_detect(aconnector);
2704
2705
2706                         drm_modeset_lock_all(dev);
2707                         dm_restore_drm_connector_state(dev, connector);
2708                         drm_modeset_unlock_all(dev);
2709
2710                         drm_kms_helper_hotplug_event(dev);
2711                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2712
2713                         if (aconnector->fake_enable)
2714                                 aconnector->fake_enable = false;
2715
2716                         amdgpu_dm_update_connector_after_detect(aconnector);
2717
2718
2719                         drm_modeset_lock_all(dev);
2720                         dm_restore_drm_connector_state(dev, connector);
2721                         drm_modeset_unlock_all(dev);
2722
2723                         drm_kms_helper_hotplug_event(dev);
2724                 }
2725         }
2726 #ifdef CONFIG_DRM_AMD_DC_HDCP
2727         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2728                 if (adev->dm.hdcp_workqueue)
2729                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2730         }
2731 #endif
2732
2733         if (dc_link->type != dc_connection_mst_branch) {
2734                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2735                 mutex_unlock(&aconnector->hpd_lock);
2736         }
2737 }
2738
2739 static void register_hpd_handlers(struct amdgpu_device *adev)
2740 {
2741         struct drm_device *dev = adev_to_drm(adev);
2742         struct drm_connector *connector;
2743         struct amdgpu_dm_connector *aconnector;
2744         const struct dc_link *dc_link;
2745         struct dc_interrupt_params int_params = {0};
2746
2747         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2748         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2749
2750         list_for_each_entry(connector,
2751                         &dev->mode_config.connector_list, head) {
2752
2753                 aconnector = to_amdgpu_dm_connector(connector);
2754                 dc_link = aconnector->dc_link;
2755
2756                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2757                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2758                         int_params.irq_source = dc_link->irq_source_hpd;
2759
2760                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2761                                         handle_hpd_irq,
2762                                         (void *) aconnector);
2763                 }
2764
2765                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2766
2767                         /* Also register for DP short pulse (hpd_rx). */
2768                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2769                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2770
2771                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2772                                         handle_hpd_rx_irq,
2773                                         (void *) aconnector);
2774                 }
2775         }
2776 }
2777
2778 #if defined(CONFIG_DRM_AMD_DC_SI)
2779 /* Register IRQ sources and initialize IRQ callbacks */
2780 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2781 {
2782         struct dc *dc = adev->dm.dc;
2783         struct common_irq_params *c_irq_params;
2784         struct dc_interrupt_params int_params = {0};
2785         int r;
2786         int i;
2787         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2788
2789         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2790         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2791
2792         /*
2793          * Actions of amdgpu_irq_add_id():
2794          * 1. Register a set() function with base driver.
2795          *    Base driver will call set() function to enable/disable an
2796          *    interrupt in DC hardware.
2797          * 2. Register amdgpu_dm_irq_handler().
2798          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2799          *    coming from DC hardware.
2800          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2801          *    for acknowledging and handling. */
2802
2803         /* Use VBLANK interrupt */
2804         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2805                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2806                 if (r) {
2807                         DRM_ERROR("Failed to add crtc irq id!\n");
2808                         return r;
2809                 }
2810
2811                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812                 int_params.irq_source =
2813                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2814
2815                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2816
2817                 c_irq_params->adev = adev;
2818                 c_irq_params->irq_src = int_params.irq_source;
2819
2820                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821                                 dm_crtc_high_irq, c_irq_params);
2822         }
2823
2824         /* Use GRPH_PFLIP interrupt */
2825         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2826                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2827                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2828                 if (r) {
2829                         DRM_ERROR("Failed to add page flip irq id!\n");
2830                         return r;
2831                 }
2832
2833                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2834                 int_params.irq_source =
2835                         dc_interrupt_to_irq_source(dc, i, 0);
2836
2837                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2838
2839                 c_irq_params->adev = adev;
2840                 c_irq_params->irq_src = int_params.irq_source;
2841
2842                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2843                                 dm_pflip_high_irq, c_irq_params);
2844
2845         }
2846
2847         /* HPD */
2848         r = amdgpu_irq_add_id(adev, client_id,
2849                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2850         if (r) {
2851                 DRM_ERROR("Failed to add hpd irq id!\n");
2852                 return r;
2853         }
2854
2855         register_hpd_handlers(adev);
2856
2857         return 0;
2858 }
2859 #endif
2860
2861 /* Register IRQ sources and initialize IRQ callbacks */
2862 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2863 {
2864         struct dc *dc = adev->dm.dc;
2865         struct common_irq_params *c_irq_params;
2866         struct dc_interrupt_params int_params = {0};
2867         int r;
2868         int i;
2869         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2870
2871         if (adev->asic_type >= CHIP_VEGA10)
2872                 client_id = SOC15_IH_CLIENTID_DCE;
2873
2874         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2875         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2876
2877         /*
2878          * Actions of amdgpu_irq_add_id():
2879          * 1. Register a set() function with base driver.
2880          *    Base driver will call set() function to enable/disable an
2881          *    interrupt in DC hardware.
2882          * 2. Register amdgpu_dm_irq_handler().
2883          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2884          *    coming from DC hardware.
2885          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2886          *    for acknowledging and handling. */
2887
2888         /* Use VBLANK interrupt */
2889         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2890                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2891                 if (r) {
2892                         DRM_ERROR("Failed to add crtc irq id!\n");
2893                         return r;
2894                 }
2895
2896                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2897                 int_params.irq_source =
2898                         dc_interrupt_to_irq_source(dc, i, 0);
2899
2900                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2901
2902                 c_irq_params->adev = adev;
2903                 c_irq_params->irq_src = int_params.irq_source;
2904
2905                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2906                                 dm_crtc_high_irq, c_irq_params);
2907         }
2908
2909         /* Use VUPDATE interrupt */
2910         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2911                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2912                 if (r) {
2913                         DRM_ERROR("Failed to add vupdate irq id!\n");
2914                         return r;
2915                 }
2916
2917                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918                 int_params.irq_source =
2919                         dc_interrupt_to_irq_source(dc, i, 0);
2920
2921                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2922
2923                 c_irq_params->adev = adev;
2924                 c_irq_params->irq_src = int_params.irq_source;
2925
2926                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2927                                 dm_vupdate_high_irq, c_irq_params);
2928         }
2929
2930         /* Use GRPH_PFLIP interrupt */
2931         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2932                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2933                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2934                 if (r) {
2935                         DRM_ERROR("Failed to add page flip irq id!\n");
2936                         return r;
2937                 }
2938
2939                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2940                 int_params.irq_source =
2941                         dc_interrupt_to_irq_source(dc, i, 0);
2942
2943                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2944
2945                 c_irq_params->adev = adev;
2946                 c_irq_params->irq_src = int_params.irq_source;
2947
2948                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2949                                 dm_pflip_high_irq, c_irq_params);
2950
2951         }
2952
2953         /* HPD */
2954         r = amdgpu_irq_add_id(adev, client_id,
2955                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2956         if (r) {
2957                 DRM_ERROR("Failed to add hpd irq id!\n");
2958                 return r;
2959         }
2960
2961         register_hpd_handlers(adev);
2962
2963         return 0;
2964 }
2965
2966 #if defined(CONFIG_DRM_AMD_DC_DCN)
2967 /* Register IRQ sources and initialize IRQ callbacks */
2968 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2969 {
2970         struct dc *dc = adev->dm.dc;
2971         struct common_irq_params *c_irq_params;
2972         struct dc_interrupt_params int_params = {0};
2973         int r;
2974         int i;
2975
2976         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2977         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2978
2979         /*
2980          * Actions of amdgpu_irq_add_id():
2981          * 1. Register a set() function with base driver.
2982          *    Base driver will call set() function to enable/disable an
2983          *    interrupt in DC hardware.
2984          * 2. Register amdgpu_dm_irq_handler().
2985          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2986          *    coming from DC hardware.
2987          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2988          *    for acknowledging and handling.
2989          */
2990
2991         /* Use VSTARTUP interrupt */
2992         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2993                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2994                         i++) {
2995                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2996
2997                 if (r) {
2998                         DRM_ERROR("Failed to add crtc irq id!\n");
2999                         return r;
3000                 }
3001
3002                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3003                 int_params.irq_source =
3004                         dc_interrupt_to_irq_source(dc, i, 0);
3005
3006                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3007
3008                 c_irq_params->adev = adev;
3009                 c_irq_params->irq_src = int_params.irq_source;
3010
3011                 amdgpu_dm_irq_register_interrupt(
3012                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3013         }
3014
3015         /* Use otg vertical line interrupt */
3016 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3017         for (i = DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL;
3018                         i <= DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL
3019                                         + adev->mode_info.num_crtc - 1;
3020                         i++) {
3021                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vline0_irq);
3022
3023                 if (r) {
3024                         DRM_ERROR("Failed to add vline0 irq id!\n");
3025                         return r;
3026                 }
3027
3028                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3029                 int_params.irq_source =
3030                         dc_interrupt_to_irq_source(dc, i, 0);
3031
3032                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3033                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3034
3035                 c_irq_params->adev = adev;
3036                 c_irq_params->irq_src = int_params.irq_source;
3037
3038                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3040         }
3041 #endif
3042
3043         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3044          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3045          * to trigger at end of each vblank, regardless of state of the lock,
3046          * matching DCE behaviour.
3047          */
3048         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3049              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3050              i++) {
3051                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3052
3053                 if (r) {
3054                         DRM_ERROR("Failed to add vupdate irq id!\n");
3055                         return r;
3056                 }
3057
3058                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059                 int_params.irq_source =
3060                         dc_interrupt_to_irq_source(dc, i, 0);
3061
3062                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3063
3064                 c_irq_params->adev = adev;
3065                 c_irq_params->irq_src = int_params.irq_source;
3066
3067                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3068                                 dm_vupdate_high_irq, c_irq_params);
3069         }
3070
3071         /* Use GRPH_PFLIP interrupt */
3072         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3073                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3074                         i++) {
3075                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3076                 if (r) {
3077                         DRM_ERROR("Failed to add page flip irq id!\n");
3078                         return r;
3079                 }
3080
3081                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3082                 int_params.irq_source =
3083                         dc_interrupt_to_irq_source(dc, i, 0);
3084
3085                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3086
3087                 c_irq_params->adev = adev;
3088                 c_irq_params->irq_src = int_params.irq_source;
3089
3090                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091                                 dm_pflip_high_irq, c_irq_params);
3092
3093         }
3094
3095         /* HPD */
3096         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3097                         &adev->hpd_irq);
3098         if (r) {
3099                 DRM_ERROR("Failed to add hpd irq id!\n");
3100                 return r;
3101         }
3102
3103         register_hpd_handlers(adev);
3104
3105         return 0;
3106 }
3107 #endif
3108
3109 /*
3110  * Acquires the lock for the atomic state object and returns
3111  * the new atomic state.
3112  *
3113  * This should only be called during atomic check.
3114  */
3115 static int dm_atomic_get_state(struct drm_atomic_state *state,
3116                                struct dm_atomic_state **dm_state)
3117 {
3118         struct drm_device *dev = state->dev;
3119         struct amdgpu_device *adev = drm_to_adev(dev);
3120         struct amdgpu_display_manager *dm = &adev->dm;
3121         struct drm_private_state *priv_state;
3122
3123         if (*dm_state)
3124                 return 0;
3125
3126         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3127         if (IS_ERR(priv_state))
3128                 return PTR_ERR(priv_state);
3129
3130         *dm_state = to_dm_atomic_state(priv_state);
3131
3132         return 0;
3133 }
3134
3135 static struct dm_atomic_state *
3136 dm_atomic_get_new_state(struct drm_atomic_state *state)
3137 {
3138         struct drm_device *dev = state->dev;
3139         struct amdgpu_device *adev = drm_to_adev(dev);
3140         struct amdgpu_display_manager *dm = &adev->dm;
3141         struct drm_private_obj *obj;
3142         struct drm_private_state *new_obj_state;
3143         int i;
3144
3145         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3146                 if (obj->funcs == dm->atomic_obj.funcs)
3147                         return to_dm_atomic_state(new_obj_state);
3148         }
3149
3150         return NULL;
3151 }
3152
3153 static struct drm_private_state *
3154 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3155 {
3156         struct dm_atomic_state *old_state, *new_state;
3157
3158         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3159         if (!new_state)
3160                 return NULL;
3161
3162         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3163
3164         old_state = to_dm_atomic_state(obj->state);
3165
3166         if (old_state && old_state->context)
3167                 new_state->context = dc_copy_state(old_state->context);
3168
3169         if (!new_state->context) {
3170                 kfree(new_state);
3171                 return NULL;
3172         }
3173
3174         return &new_state->base;
3175 }
3176
3177 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3178                                     struct drm_private_state *state)
3179 {
3180         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3181
3182         if (dm_state && dm_state->context)
3183                 dc_release_state(dm_state->context);
3184
3185         kfree(dm_state);
3186 }
3187
3188 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3189         .atomic_duplicate_state = dm_atomic_duplicate_state,
3190         .atomic_destroy_state = dm_atomic_destroy_state,
3191 };
3192
3193 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3194 {
3195         struct dm_atomic_state *state;
3196         int r;
3197
3198         adev->mode_info.mode_config_initialized = true;
3199
3200         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3201         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3202
3203         adev_to_drm(adev)->mode_config.max_width = 16384;
3204         adev_to_drm(adev)->mode_config.max_height = 16384;
3205
3206         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3207         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3208         /* indicates support for immediate flip */
3209         adev_to_drm(adev)->mode_config.async_page_flip = true;
3210
3211         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3212
3213         state = kzalloc(sizeof(*state), GFP_KERNEL);
3214         if (!state)
3215                 return -ENOMEM;
3216
3217         state->context = dc_create_state(adev->dm.dc);
3218         if (!state->context) {
3219                 kfree(state);
3220                 return -ENOMEM;
3221         }
3222
3223         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3224
3225         drm_atomic_private_obj_init(adev_to_drm(adev),
3226                                     &adev->dm.atomic_obj,
3227                                     &state->base,
3228                                     &dm_atomic_state_funcs);
3229
3230         r = amdgpu_display_modeset_create_props(adev);
3231         if (r) {
3232                 dc_release_state(state->context);
3233                 kfree(state);
3234                 return r;
3235         }
3236
3237         r = amdgpu_dm_audio_init(adev);
3238         if (r) {
3239                 dc_release_state(state->context);
3240                 kfree(state);
3241                 return r;
3242         }
3243
3244         return 0;
3245 }
3246
3247 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3248 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3249 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3250
3251 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3252         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3253
3254 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3255 {
3256 #if defined(CONFIG_ACPI)
3257         struct amdgpu_dm_backlight_caps caps;
3258
3259         memset(&caps, 0, sizeof(caps));
3260
3261         if (dm->backlight_caps.caps_valid)
3262                 return;
3263
3264         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3265         if (caps.caps_valid) {
3266                 dm->backlight_caps.caps_valid = true;
3267                 if (caps.aux_support)
3268                         return;
3269                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3270                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3271         } else {
3272                 dm->backlight_caps.min_input_signal =
3273                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3274                 dm->backlight_caps.max_input_signal =
3275                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3276         }
3277 #else
3278         if (dm->backlight_caps.aux_support)
3279                 return;
3280
3281         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3282         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3283 #endif
3284 }
3285
3286 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3287                                 unsigned *min, unsigned *max)
3288 {
3289         if (!caps)
3290                 return 0;
3291
3292         if (caps->aux_support) {
3293                 // Firmware limits are in nits, DC API wants millinits.
3294                 *max = 1000 * caps->aux_max_input_signal;
3295                 *min = 1000 * caps->aux_min_input_signal;
3296         } else {
3297                 // Firmware limits are 8-bit, PWM control is 16-bit.
3298                 *max = 0x101 * caps->max_input_signal;
3299                 *min = 0x101 * caps->min_input_signal;
3300         }
3301         return 1;
3302 }
3303
3304 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3305                                         uint32_t brightness)
3306 {
3307         unsigned min, max;
3308
3309         if (!get_brightness_range(caps, &min, &max))
3310                 return brightness;
3311
3312         // Rescale 0..255 to min..max
3313         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3314                                        AMDGPU_MAX_BL_LEVEL);
3315 }
3316
3317 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3318                                       uint32_t brightness)
3319 {
3320         unsigned min, max;
3321
3322         if (!get_brightness_range(caps, &min, &max))
3323                 return brightness;
3324
3325         if (brightness < min)
3326                 return 0;
3327         // Rescale min..max to 0..255
3328         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3329                                  max - min);
3330 }
3331
3332 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3333 {
3334         struct amdgpu_display_manager *dm = bl_get_data(bd);
3335         struct amdgpu_dm_backlight_caps caps;
3336         struct dc_link *link = NULL;
3337         u32 brightness;
3338         bool rc;
3339
3340         amdgpu_dm_update_backlight_caps(dm);
3341         caps = dm->backlight_caps;
3342
3343         link = (struct dc_link *)dm->backlight_link;
3344
3345         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3346         // Change brightness based on AUX property
3347         if (caps.aux_support)
3348                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3349                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3350         else
3351                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3352
3353         return rc ? 0 : 1;
3354 }
3355
3356 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3357 {
3358         struct amdgpu_display_manager *dm = bl_get_data(bd);
3359         struct amdgpu_dm_backlight_caps caps;
3360
3361         amdgpu_dm_update_backlight_caps(dm);
3362         caps = dm->backlight_caps;
3363
3364         if (caps.aux_support) {
3365                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3366                 u32 avg, peak;
3367                 bool rc;
3368
3369                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3370                 if (!rc)
3371                         return bd->props.brightness;
3372                 return convert_brightness_to_user(&caps, avg);
3373         } else {
3374                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3375
3376                 if (ret == DC_ERROR_UNEXPECTED)
3377                         return bd->props.brightness;
3378                 return convert_brightness_to_user(&caps, ret);
3379         }
3380 }
3381
3382 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3383         .options = BL_CORE_SUSPENDRESUME,
3384         .get_brightness = amdgpu_dm_backlight_get_brightness,
3385         .update_status  = amdgpu_dm_backlight_update_status,
3386 };
3387
3388 static void
3389 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3390 {
3391         char bl_name[16];
3392         struct backlight_properties props = { 0 };
3393
3394         amdgpu_dm_update_backlight_caps(dm);
3395
3396         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3397         props.brightness = AMDGPU_MAX_BL_LEVEL;
3398         props.type = BACKLIGHT_RAW;
3399
3400         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3401                  adev_to_drm(dm->adev)->primary->index);
3402
3403         dm->backlight_dev = backlight_device_register(bl_name,
3404                                                       adev_to_drm(dm->adev)->dev,
3405                                                       dm,
3406                                                       &amdgpu_dm_backlight_ops,
3407                                                       &props);
3408
3409         if (IS_ERR(dm->backlight_dev))
3410                 DRM_ERROR("DM: Backlight registration failed!\n");
3411         else
3412                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3413 }
3414
3415 #endif
3416
3417 static int initialize_plane(struct amdgpu_display_manager *dm,
3418                             struct amdgpu_mode_info *mode_info, int plane_id,
3419                             enum drm_plane_type plane_type,
3420                             const struct dc_plane_cap *plane_cap)
3421 {
3422         struct drm_plane *plane;
3423         unsigned long possible_crtcs;
3424         int ret = 0;
3425
3426         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3427         if (!plane) {
3428                 DRM_ERROR("KMS: Failed to allocate plane\n");
3429                 return -ENOMEM;
3430         }
3431         plane->type = plane_type;
3432
3433         /*
3434          * HACK: IGT tests expect that the primary plane for a CRTC
3435          * can only have one possible CRTC. Only expose support for
3436          * any CRTC if they're not going to be used as a primary plane
3437          * for a CRTC - like overlay or underlay planes.
3438          */
3439         possible_crtcs = 1 << plane_id;
3440         if (plane_id >= dm->dc->caps.max_streams)
3441                 possible_crtcs = 0xff;
3442
3443         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3444
3445         if (ret) {
3446                 DRM_ERROR("KMS: Failed to initialize plane\n");
3447                 kfree(plane);
3448                 return ret;
3449         }
3450
3451         if (mode_info)
3452                 mode_info->planes[plane_id] = plane;
3453
3454         return ret;
3455 }
3456
3457
3458 static void register_backlight_device(struct amdgpu_display_manager *dm,
3459                                       struct dc_link *link)
3460 {
3461 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3462         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3463
3464         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3465             link->type != dc_connection_none) {
3466                 /*
3467                  * Event if registration failed, we should continue with
3468                  * DM initialization because not having a backlight control
3469                  * is better then a black screen.
3470                  */
3471                 amdgpu_dm_register_backlight_device(dm);
3472
3473                 if (dm->backlight_dev)
3474                         dm->backlight_link = link;
3475         }
3476 #endif
3477 }
3478
3479
3480 /*
3481  * In this architecture, the association
3482  * connector -> encoder -> crtc
3483  * id not really requried. The crtc and connector will hold the
3484  * display_index as an abstraction to use with DAL component
3485  *
3486  * Returns 0 on success
3487  */
3488 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3489 {
3490         struct amdgpu_display_manager *dm = &adev->dm;
3491         int32_t i;
3492         struct amdgpu_dm_connector *aconnector = NULL;
3493         struct amdgpu_encoder *aencoder = NULL;
3494         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3495         uint32_t link_cnt;
3496         int32_t primary_planes;
3497         enum dc_connection_type new_connection_type = dc_connection_none;
3498         const struct dc_plane_cap *plane;
3499
3500         dm->display_indexes_num = dm->dc->caps.max_streams;
3501         /* Update the actual used number of crtc */
3502         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3503
3504         link_cnt = dm->dc->caps.max_links;
3505         if (amdgpu_dm_mode_config_init(dm->adev)) {
3506                 DRM_ERROR("DM: Failed to initialize mode config\n");
3507                 return -EINVAL;
3508         }
3509
3510         /* There is one primary plane per CRTC */
3511         primary_planes = dm->dc->caps.max_streams;
3512         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3513
3514         /*
3515          * Initialize primary planes, implicit planes for legacy IOCTLS.
3516          * Order is reversed to match iteration order in atomic check.
3517          */
3518         for (i = (primary_planes - 1); i >= 0; i--) {
3519                 plane = &dm->dc->caps.planes[i];
3520
3521                 if (initialize_plane(dm, mode_info, i,
3522                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3523                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3524                         goto fail;
3525                 }
3526         }
3527
3528         /*
3529          * Initialize overlay planes, index starting after primary planes.
3530          * These planes have a higher DRM index than the primary planes since
3531          * they should be considered as having a higher z-order.
3532          * Order is reversed to match iteration order in atomic check.
3533          *
3534          * Only support DCN for now, and only expose one so we don't encourage
3535          * userspace to use up all the pipes.
3536          */
3537         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3538                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3539
3540                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3541                         continue;
3542
3543                 if (!plane->blends_with_above || !plane->blends_with_below)
3544                         continue;
3545
3546                 if (!plane->pixel_format_support.argb8888)
3547                         continue;
3548
3549                 if (initialize_plane(dm, NULL, primary_planes + i,
3550                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3551                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3552                         goto fail;
3553                 }
3554
3555                 /* Only create one overlay plane. */
3556                 break;
3557         }
3558
3559         for (i = 0; i < dm->dc->caps.max_streams; i++)
3560                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3561                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3562                         goto fail;
3563                 }
3564
3565         /* loops over all connectors on the board */
3566         for (i = 0; i < link_cnt; i++) {
3567                 struct dc_link *link = NULL;
3568
3569                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3570                         DRM_ERROR(
3571                                 "KMS: Cannot support more than %d display indexes\n",
3572                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3573                         continue;
3574                 }
3575
3576                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3577                 if (!aconnector)
3578                         goto fail;
3579
3580                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3581                 if (!aencoder)
3582                         goto fail;
3583
3584                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3585                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3586                         goto fail;
3587                 }
3588
3589                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3590                         DRM_ERROR("KMS: Failed to initialize connector\n");
3591                         goto fail;
3592                 }
3593
3594                 link = dc_get_link_at_index(dm->dc, i);
3595
3596                 if (!dc_link_detect_sink(link, &new_connection_type))
3597                         DRM_ERROR("KMS: Failed to detect connector\n");
3598
3599                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3600                         emulated_link_detect(link);
3601                         amdgpu_dm_update_connector_after_detect(aconnector);
3602
3603                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3604                         amdgpu_dm_update_connector_after_detect(aconnector);
3605                         register_backlight_device(dm, link);
3606                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3607                                 amdgpu_dm_set_psr_caps(link);
3608                 }
3609
3610
3611         }
3612
3613         /* Software is initialized. Now we can register interrupt handlers. */
3614         switch (adev->asic_type) {
3615 #if defined(CONFIG_DRM_AMD_DC_SI)
3616         case CHIP_TAHITI:
3617         case CHIP_PITCAIRN:
3618         case CHIP_VERDE:
3619         case CHIP_OLAND:
3620                 if (dce60_register_irq_handlers(dm->adev)) {
3621                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3622                         goto fail;
3623                 }
3624                 break;
3625 #endif
3626         case CHIP_BONAIRE:
3627         case CHIP_HAWAII:
3628         case CHIP_KAVERI:
3629         case CHIP_KABINI:
3630         case CHIP_MULLINS:
3631         case CHIP_TONGA:
3632         case CHIP_FIJI:
3633         case CHIP_CARRIZO:
3634         case CHIP_STONEY:
3635         case CHIP_POLARIS11:
3636         case CHIP_POLARIS10:
3637         case CHIP_POLARIS12:
3638         case CHIP_VEGAM:
3639         case CHIP_VEGA10:
3640         case CHIP_VEGA12:
3641         case CHIP_VEGA20:
3642                 if (dce110_register_irq_handlers(dm->adev)) {
3643                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3644                         goto fail;
3645                 }
3646                 break;
3647 #if defined(CONFIG_DRM_AMD_DC_DCN)
3648         case CHIP_RAVEN:
3649         case CHIP_NAVI12:
3650         case CHIP_NAVI10:
3651         case CHIP_NAVI14:
3652         case CHIP_RENOIR:
3653         case CHIP_SIENNA_CICHLID:
3654         case CHIP_NAVY_FLOUNDER:
3655         case CHIP_DIMGREY_CAVEFISH:
3656         case CHIP_VANGOGH:
3657                 if (dcn10_register_irq_handlers(dm->adev)) {
3658                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3659                         goto fail;
3660                 }
3661                 break;
3662 #endif
3663         default:
3664                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3665                 goto fail;
3666         }
3667
3668         return 0;
3669 fail:
3670         kfree(aencoder);
3671         kfree(aconnector);
3672
3673         return -EINVAL;
3674 }
3675
3676 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3677 {
3678         drm_mode_config_cleanup(dm->ddev);
3679         drm_atomic_private_obj_fini(&dm->atomic_obj);
3680         return;
3681 }
3682
3683 /******************************************************************************
3684  * amdgpu_display_funcs functions
3685  *****************************************************************************/
3686
3687 /*
3688  * dm_bandwidth_update - program display watermarks
3689  *
3690  * @adev: amdgpu_device pointer
3691  *
3692  * Calculate and program the display watermarks and line buffer allocation.
3693  */
3694 static void dm_bandwidth_update(struct amdgpu_device *adev)
3695 {
3696         /* TODO: implement later */
3697 }
3698
3699 static const struct amdgpu_display_funcs dm_display_funcs = {
3700         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3701         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3702         .backlight_set_level = NULL, /* never called for DC */
3703         .backlight_get_level = NULL, /* never called for DC */
3704         .hpd_sense = NULL,/* called unconditionally */
3705         .hpd_set_polarity = NULL, /* called unconditionally */
3706         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3707         .page_flip_get_scanoutpos =
3708                 dm_crtc_get_scanoutpos,/* called unconditionally */
3709         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3710         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3711 };
3712
3713 #if defined(CONFIG_DEBUG_KERNEL_DC)
3714
3715 static ssize_t s3_debug_store(struct device *device,
3716                               struct device_attribute *attr,
3717                               const char *buf,
3718                               size_t count)
3719 {
3720         int ret;
3721         int s3_state;
3722         struct drm_device *drm_dev = dev_get_drvdata(device);
3723         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3724
3725         ret = kstrtoint(buf, 0, &s3_state);
3726
3727         if (ret == 0) {
3728                 if (s3_state) {
3729                         dm_resume(adev);
3730                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3731                 } else
3732                         dm_suspend(adev);
3733         }
3734
3735         return ret == 0 ? count : 0;
3736 }
3737
3738 DEVICE_ATTR_WO(s3_debug);
3739
3740 #endif
3741
3742 static int dm_early_init(void *handle)
3743 {
3744         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3745
3746         switch (adev->asic_type) {
3747 #if defined(CONFIG_DRM_AMD_DC_SI)
3748         case CHIP_TAHITI:
3749         case CHIP_PITCAIRN:
3750         case CHIP_VERDE:
3751                 adev->mode_info.num_crtc = 6;
3752                 adev->mode_info.num_hpd = 6;
3753                 adev->mode_info.num_dig = 6;
3754                 break;
3755         case CHIP_OLAND:
3756                 adev->mode_info.num_crtc = 2;
3757                 adev->mode_info.num_hpd = 2;
3758                 adev->mode_info.num_dig = 2;
3759                 break;
3760 #endif
3761         case CHIP_BONAIRE:
3762         case CHIP_HAWAII:
3763                 adev->mode_info.num_crtc = 6;
3764                 adev->mode_info.num_hpd = 6;
3765                 adev->mode_info.num_dig = 6;
3766                 break;
3767         case CHIP_KAVERI:
3768                 adev->mode_info.num_crtc = 4;
3769                 adev->mode_info.num_hpd = 6;
3770                 adev->mode_info.num_dig = 7;
3771                 break;
3772         case CHIP_KABINI:
3773         case CHIP_MULLINS:
3774                 adev->mode_info.num_crtc = 2;
3775                 adev->mode_info.num_hpd = 6;
3776                 adev->mode_info.num_dig = 6;
3777                 break;
3778         case CHIP_FIJI:
3779         case CHIP_TONGA:
3780                 adev->mode_info.num_crtc = 6;
3781                 adev->mode_info.num_hpd = 6;
3782                 adev->mode_info.num_dig = 7;
3783                 break;
3784         case CHIP_CARRIZO:
3785                 adev->mode_info.num_crtc = 3;
3786                 adev->mode_info.num_hpd = 6;
3787                 adev->mode_info.num_dig = 9;
3788                 break;
3789         case CHIP_STONEY:
3790                 adev->mode_info.num_crtc = 2;
3791                 adev->mode_info.num_hpd = 6;
3792                 adev->mode_info.num_dig = 9;
3793                 break;
3794         case CHIP_POLARIS11:
3795         case CHIP_POLARIS12:
3796                 adev->mode_info.num_crtc = 5;
3797                 adev->mode_info.num_hpd = 5;
3798                 adev->mode_info.num_dig = 5;
3799                 break;
3800         case CHIP_POLARIS10:
3801         case CHIP_VEGAM:
3802                 adev->mode_info.num_crtc = 6;
3803                 adev->mode_info.num_hpd = 6;
3804                 adev->mode_info.num_dig = 6;
3805                 break;
3806         case CHIP_VEGA10:
3807         case CHIP_VEGA12:
3808         case CHIP_VEGA20:
3809                 adev->mode_info.num_crtc = 6;
3810                 adev->mode_info.num_hpd = 6;
3811                 adev->mode_info.num_dig = 6;
3812                 break;
3813 #if defined(CONFIG_DRM_AMD_DC_DCN)
3814         case CHIP_RAVEN:
3815         case CHIP_RENOIR:
3816         case CHIP_VANGOGH:
3817                 adev->mode_info.num_crtc = 4;
3818                 adev->mode_info.num_hpd = 4;
3819                 adev->mode_info.num_dig = 4;
3820                 break;
3821         case CHIP_NAVI10:
3822         case CHIP_NAVI12:
3823         case CHIP_SIENNA_CICHLID:
3824         case CHIP_NAVY_FLOUNDER:
3825                 adev->mode_info.num_crtc = 6;
3826                 adev->mode_info.num_hpd = 6;
3827                 adev->mode_info.num_dig = 6;
3828                 break;
3829         case CHIP_NAVI14:
3830         case CHIP_DIMGREY_CAVEFISH:
3831                 adev->mode_info.num_crtc = 5;
3832                 adev->mode_info.num_hpd = 5;
3833                 adev->mode_info.num_dig = 5;
3834                 break;
3835 #endif
3836         default:
3837                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3838                 return -EINVAL;
3839         }
3840
3841         amdgpu_dm_set_irq_funcs(adev);
3842
3843         if (adev->mode_info.funcs == NULL)
3844                 adev->mode_info.funcs = &dm_display_funcs;
3845
3846         /*
3847          * Note: Do NOT change adev->audio_endpt_rreg and
3848          * adev->audio_endpt_wreg because they are initialised in
3849          * amdgpu_device_init()
3850          */
3851 #if defined(CONFIG_DEBUG_KERNEL_DC)
3852         device_create_file(
3853                 adev_to_drm(adev)->dev,
3854                 &dev_attr_s3_debug);
3855 #endif
3856
3857         return 0;
3858 }
3859
3860 static bool modeset_required(struct drm_crtc_state *crtc_state,
3861                              struct dc_stream_state *new_stream,
3862                              struct dc_stream_state *old_stream)
3863 {
3864         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3865 }
3866
3867 static bool modereset_required(struct drm_crtc_state *crtc_state)
3868 {
3869         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3870 }
3871
3872 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3873 {
3874         drm_encoder_cleanup(encoder);
3875         kfree(encoder);
3876 }
3877
3878 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3879         .destroy = amdgpu_dm_encoder_destroy,
3880 };
3881
3882
3883 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3884                                          struct drm_framebuffer *fb,
3885                                          int *min_downscale, int *max_upscale)
3886 {
3887         struct amdgpu_device *adev = drm_to_adev(dev);
3888         struct dc *dc = adev->dm.dc;
3889         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3890         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3891
3892         switch (fb->format->format) {
3893         case DRM_FORMAT_P010:
3894         case DRM_FORMAT_NV12:
3895         case DRM_FORMAT_NV21:
3896                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3897                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3898                 break;
3899
3900         case DRM_FORMAT_XRGB16161616F:
3901         case DRM_FORMAT_ARGB16161616F:
3902         case DRM_FORMAT_XBGR16161616F:
3903         case DRM_FORMAT_ABGR16161616F:
3904                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3905                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3906                 break;
3907
3908         default:
3909                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3910                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3911                 break;
3912         }
3913
3914         /*
3915          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3916          * scaling factor of 1.0 == 1000 units.
3917          */
3918         if (*max_upscale == 1)
3919                 *max_upscale = 1000;
3920
3921         if (*min_downscale == 1)
3922                 *min_downscale = 1000;
3923 }
3924
3925
3926 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3927                                 struct dc_scaling_info *scaling_info)
3928 {
3929         int scale_w, scale_h, min_downscale, max_upscale;
3930
3931         memset(scaling_info, 0, sizeof(*scaling_info));
3932
3933         /* Source is fixed 16.16 but we ignore mantissa for now... */
3934         scaling_info->src_rect.x = state->src_x >> 16;
3935         scaling_info->src_rect.y = state->src_y >> 16;
3936
3937         scaling_info->src_rect.width = state->src_w >> 16;
3938         if (scaling_info->src_rect.width == 0)
3939                 return -EINVAL;
3940
3941         scaling_info->src_rect.height = state->src_h >> 16;
3942         if (scaling_info->src_rect.height == 0)
3943                 return -EINVAL;
3944
3945         scaling_info->dst_rect.x = state->crtc_x;
3946         scaling_info->dst_rect.y = state->crtc_y;
3947
3948         if (state->crtc_w == 0)
3949                 return -EINVAL;
3950
3951         scaling_info->dst_rect.width = state->crtc_w;
3952
3953         if (state->crtc_h == 0)
3954                 return -EINVAL;
3955
3956         scaling_info->dst_rect.height = state->crtc_h;
3957
3958         /* DRM doesn't specify clipping on destination output. */
3959         scaling_info->clip_rect = scaling_info->dst_rect;
3960
3961         /* Validate scaling per-format with DC plane caps */
3962         if (state->plane && state->plane->dev && state->fb) {
3963                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3964                                              &min_downscale, &max_upscale);
3965         } else {
3966                 min_downscale = 250;
3967                 max_upscale = 16000;
3968         }
3969
3970         scale_w = scaling_info->dst_rect.width * 1000 /
3971                   scaling_info->src_rect.width;
3972
3973         if (scale_w < min_downscale || scale_w > max_upscale)
3974                 return -EINVAL;
3975
3976         scale_h = scaling_info->dst_rect.height * 1000 /
3977                   scaling_info->src_rect.height;
3978
3979         if (scale_h < min_downscale || scale_h > max_upscale)
3980                 return -EINVAL;
3981
3982         /*
3983          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3984          * assume reasonable defaults based on the format.
3985          */
3986
3987         return 0;
3988 }
3989
3990 static void
3991 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3992                                  uint64_t tiling_flags)
3993 {
3994         /* Fill GFX8 params */
3995         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3996                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3997
3998                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3999                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4000                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4001                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4002                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4003
4004                 /* XXX fix me for VI */
4005                 tiling_info->gfx8.num_banks = num_banks;
4006                 tiling_info->gfx8.array_mode =
4007                                 DC_ARRAY_2D_TILED_THIN1;
4008                 tiling_info->gfx8.tile_split = tile_split;
4009                 tiling_info->gfx8.bank_width = bankw;
4010                 tiling_info->gfx8.bank_height = bankh;
4011                 tiling_info->gfx8.tile_aspect = mtaspect;
4012                 tiling_info->gfx8.tile_mode =
4013                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4014         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4015                         == DC_ARRAY_1D_TILED_THIN1) {
4016                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4017         }
4018
4019         tiling_info->gfx8.pipe_config =
4020                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4021 }
4022
4023 static void
4024 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4025                                   union dc_tiling_info *tiling_info)
4026 {
4027         tiling_info->gfx9.num_pipes =
4028                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4029         tiling_info->gfx9.num_banks =
4030                 adev->gfx.config.gb_addr_config_fields.num_banks;
4031         tiling_info->gfx9.pipe_interleave =
4032                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4033         tiling_info->gfx9.num_shader_engines =
4034                 adev->gfx.config.gb_addr_config_fields.num_se;
4035         tiling_info->gfx9.max_compressed_frags =
4036                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4037         tiling_info->gfx9.num_rb_per_se =
4038                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4039         tiling_info->gfx9.shaderEnable = 1;
4040         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4041             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4042             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4043             adev->asic_type == CHIP_VANGOGH)
4044                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4045 }
4046
4047 static int
4048 validate_dcc(struct amdgpu_device *adev,
4049              const enum surface_pixel_format format,
4050              const enum dc_rotation_angle rotation,
4051              const union dc_tiling_info *tiling_info,
4052              const struct dc_plane_dcc_param *dcc,
4053              const struct dc_plane_address *address,
4054              const struct plane_size *plane_size)
4055 {
4056         struct dc *dc = adev->dm.dc;
4057         struct dc_dcc_surface_param input;
4058         struct dc_surface_dcc_cap output;
4059
4060         memset(&input, 0, sizeof(input));
4061         memset(&output, 0, sizeof(output));
4062
4063         if (!dcc->enable)
4064                 return 0;
4065
4066         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4067             !dc->cap_funcs.get_dcc_compression_cap)
4068                 return -EINVAL;
4069
4070         input.format = format;
4071         input.surface_size.width = plane_size->surface_size.width;
4072         input.surface_size.height = plane_size->surface_size.height;
4073         input.swizzle_mode = tiling_info->gfx9.swizzle;
4074
4075         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4076                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4077         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4078                 input.scan = SCAN_DIRECTION_VERTICAL;
4079
4080         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4081                 return -EINVAL;
4082
4083         if (!output.capable)
4084                 return -EINVAL;
4085
4086         if (dcc->independent_64b_blks == 0 &&
4087             output.grph.rgb.independent_64b_blks != 0)
4088                 return -EINVAL;
4089
4090         return 0;
4091 }
4092
4093 static bool
4094 modifier_has_dcc(uint64_t modifier)
4095 {
4096         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4097 }
4098
4099 static unsigned
4100 modifier_gfx9_swizzle_mode(uint64_t modifier)
4101 {
4102         if (modifier == DRM_FORMAT_MOD_LINEAR)
4103                 return 0;
4104
4105         return AMD_FMT_MOD_GET(TILE, modifier);
4106 }
4107
4108 static const struct drm_format_info *
4109 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4110 {
4111         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4112 }
4113
4114 static void
4115 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4116                                     union dc_tiling_info *tiling_info,
4117                                     uint64_t modifier)
4118 {
4119         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4120         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4121         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4122         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4123
4124         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4125
4126         if (!IS_AMD_FMT_MOD(modifier))
4127                 return;
4128
4129         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4130         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4131
4132         if (adev->family >= AMDGPU_FAMILY_NV) {
4133                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4134         } else {
4135                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4136
4137                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4138         }
4139 }
4140
4141 enum dm_micro_swizzle {
4142         MICRO_SWIZZLE_Z = 0,
4143         MICRO_SWIZZLE_S = 1,
4144         MICRO_SWIZZLE_D = 2,
4145         MICRO_SWIZZLE_R = 3
4146 };
4147
4148 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4149                                           uint32_t format,
4150                                           uint64_t modifier)
4151 {
4152         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4153         const struct drm_format_info *info = drm_format_info(format);
4154
4155         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4156
4157         if (!info)
4158                 return false;
4159
4160         /*
4161          * We always have to allow this modifier, because core DRM still
4162          * checks LINEAR support if userspace does not provide modifers.
4163          */
4164         if (modifier == DRM_FORMAT_MOD_LINEAR)
4165                 return true;
4166
4167         /*
4168          * The arbitrary tiling support for multiplane formats has not been hooked
4169          * up.
4170          */
4171         if (info->num_planes > 1)
4172                 return false;
4173
4174         /*
4175          * For D swizzle the canonical modifier depends on the bpp, so check
4176          * it here.
4177          */
4178         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4179             adev->family >= AMDGPU_FAMILY_NV) {
4180                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4181                         return false;
4182         }
4183
4184         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4185             info->cpp[0] < 8)
4186                 return false;
4187
4188         if (modifier_has_dcc(modifier)) {
4189                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4190                 if (info->cpp[0] != 4)
4191                         return false;
4192         }
4193
4194         return true;
4195 }
4196
4197 static void
4198 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4199 {
4200         if (!*mods)
4201                 return;
4202
4203         if (*cap - *size < 1) {
4204                 uint64_t new_cap = *cap * 2;
4205                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4206
4207                 if (!new_mods) {
4208                         kfree(*mods);
4209                         *mods = NULL;
4210                         return;
4211                 }
4212
4213                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4214                 kfree(*mods);
4215                 *mods = new_mods;
4216                 *cap = new_cap;
4217         }
4218
4219         (*mods)[*size] = mod;
4220         *size += 1;
4221 }
4222
4223 static void
4224 add_gfx9_modifiers(const struct amdgpu_device *adev,
4225                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4226 {
4227         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4228         int pipe_xor_bits = min(8, pipes +
4229                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4230         int bank_xor_bits = min(8 - pipe_xor_bits,
4231                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4232         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4233                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4234
4235
4236         if (adev->family == AMDGPU_FAMILY_RV) {
4237                 /* Raven2 and later */
4238                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4239
4240                 /*
4241                  * No _D DCC swizzles yet because we only allow 32bpp, which
4242                  * doesn't support _D on DCN
4243                  */
4244
4245                 if (has_constant_encode) {
4246                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4247                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4248                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4249                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4250                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4251                                     AMD_FMT_MOD_SET(DCC, 1) |
4252                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4253                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4254                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4255                 }
4256
4257                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4258                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4259                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4260                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4261                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4262                             AMD_FMT_MOD_SET(DCC, 1) |
4263                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4264                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4265                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4266
4267                 if (has_constant_encode) {
4268                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4269                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4270                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4271                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4272                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4273                                     AMD_FMT_MOD_SET(DCC, 1) |
4274                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4275                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4276                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4277
4278                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4279                                     AMD_FMT_MOD_SET(RB, rb) |
4280                                     AMD_FMT_MOD_SET(PIPE, pipes));
4281                 }
4282
4283                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4284                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4285                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4286                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4287                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4288                             AMD_FMT_MOD_SET(DCC, 1) |
4289                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4290                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4291                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4292                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4293                             AMD_FMT_MOD_SET(RB, rb) |
4294                             AMD_FMT_MOD_SET(PIPE, pipes));
4295         }
4296
4297         /*
4298          * Only supported for 64bpp on Raven, will be filtered on format in
4299          * dm_plane_format_mod_supported.
4300          */
4301         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4303                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4304                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4306
4307         if (adev->family == AMDGPU_FAMILY_RV) {
4308                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4309                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4310                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4311                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4312                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4313         }
4314
4315         /*
4316          * Only supported for 64bpp on Raven, will be filtered on format in
4317          * dm_plane_format_mod_supported.
4318          */
4319         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4321                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4322
4323         if (adev->family == AMDGPU_FAMILY_RV) {
4324                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4325                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4326                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4327         }
4328 }
4329
4330 static void
4331 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4332                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4333 {
4334         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4335
4336         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4338                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4339                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4340                     AMD_FMT_MOD_SET(DCC, 1) |
4341                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4342                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4343                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4344
4345         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4346                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4347                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4348                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4349                     AMD_FMT_MOD_SET(DCC, 1) |
4350                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4351                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4352                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4353                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4354
4355         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4356                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4357                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4358                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4359
4360         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4361                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4362                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4363                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4364
4365
4366         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4367         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4368                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4369                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4370
4371         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4372                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4373                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4374 }
4375
4376 static void
4377 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4378                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4379 {
4380         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4381         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4382
4383         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4385                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4386                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4387                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4388                     AMD_FMT_MOD_SET(DCC, 1) |
4389                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4390                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4391                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4392                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4393
4394         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4395                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4396                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4397                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4398                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4399                     AMD_FMT_MOD_SET(DCC, 1) |
4400                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4401                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4402                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4403                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4404                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4405
4406         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4407                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4408                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4409                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4410                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4411
4412         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4413                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4414                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4415                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4416                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4417
4418         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4419         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4421                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4422
4423         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4424                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4425                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4426 }
4427
4428 static int
4429 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4430 {
4431         uint64_t size = 0, capacity = 128;
4432         *mods = NULL;
4433
4434         /* We have not hooked up any pre-GFX9 modifiers. */
4435         if (adev->family < AMDGPU_FAMILY_AI)
4436                 return 0;
4437
4438         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4439
4440         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4441                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4442                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4443                 return *mods ? 0 : -ENOMEM;
4444         }
4445
4446         switch (adev->family) {
4447         case AMDGPU_FAMILY_AI:
4448         case AMDGPU_FAMILY_RV:
4449                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4450                 break;
4451         case AMDGPU_FAMILY_NV:
4452         case AMDGPU_FAMILY_VGH:
4453                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4454                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4455                 else
4456                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4457                 break;
4458         }
4459
4460         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4461
4462         /* INVALID marks the end of the list. */
4463         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4464
4465         if (!*mods)
4466                 return -ENOMEM;
4467
4468         return 0;
4469 }
4470
4471 static int
4472 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4473                                           const struct amdgpu_framebuffer *afb,
4474                                           const enum surface_pixel_format format,
4475                                           const enum dc_rotation_angle rotation,
4476                                           const struct plane_size *plane_size,
4477                                           union dc_tiling_info *tiling_info,
4478                                           struct dc_plane_dcc_param *dcc,
4479                                           struct dc_plane_address *address,
4480                                           const bool force_disable_dcc)
4481 {
4482         const uint64_t modifier = afb->base.modifier;
4483         int ret;
4484
4485         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4486         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4487
4488         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4489                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4490
4491                 dcc->enable = 1;
4492                 dcc->meta_pitch = afb->base.pitches[1];
4493                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4494
4495                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4496                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4497         }
4498
4499         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4500         if (ret)
4501                 return ret;
4502
4503         return 0;
4504 }
4505
4506 static int
4507 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4508                              const struct amdgpu_framebuffer *afb,
4509                              const enum surface_pixel_format format,
4510                              const enum dc_rotation_angle rotation,
4511                              const uint64_t tiling_flags,
4512                              union dc_tiling_info *tiling_info,
4513                              struct plane_size *plane_size,
4514                              struct dc_plane_dcc_param *dcc,
4515                              struct dc_plane_address *address,
4516                              bool tmz_surface,
4517                              bool force_disable_dcc)
4518 {
4519         const struct drm_framebuffer *fb = &afb->base;
4520         int ret;
4521
4522         memset(tiling_info, 0, sizeof(*tiling_info));
4523         memset(plane_size, 0, sizeof(*plane_size));
4524         memset(dcc, 0, sizeof(*dcc));
4525         memset(address, 0, sizeof(*address));
4526
4527         address->tmz_surface = tmz_surface;
4528
4529         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4530                 uint64_t addr = afb->address + fb->offsets[0];
4531
4532                 plane_size->surface_size.x = 0;
4533                 plane_size->surface_size.y = 0;
4534                 plane_size->surface_size.width = fb->width;
4535                 plane_size->surface_size.height = fb->height;
4536                 plane_size->surface_pitch =
4537                         fb->pitches[0] / fb->format->cpp[0];
4538
4539                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4540                 address->grph.addr.low_part = lower_32_bits(addr);
4541                 address->grph.addr.high_part = upper_32_bits(addr);
4542         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4543                 uint64_t luma_addr = afb->address + fb->offsets[0];
4544                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4545
4546                 plane_size->surface_size.x = 0;
4547                 plane_size->surface_size.y = 0;
4548                 plane_size->surface_size.width = fb->width;
4549                 plane_size->surface_size.height = fb->height;
4550                 plane_size->surface_pitch =
4551                         fb->pitches[0] / fb->format->cpp[0];
4552
4553                 plane_size->chroma_size.x = 0;
4554                 plane_size->chroma_size.y = 0;
4555                 /* TODO: set these based on surface format */
4556                 plane_size->chroma_size.width = fb->width / 2;
4557                 plane_size->chroma_size.height = fb->height / 2;
4558
4559                 plane_size->chroma_pitch =
4560                         fb->pitches[1] / fb->format->cpp[1];
4561
4562                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4563                 address->video_progressive.luma_addr.low_part =
4564                         lower_32_bits(luma_addr);
4565                 address->video_progressive.luma_addr.high_part =
4566                         upper_32_bits(luma_addr);
4567                 address->video_progressive.chroma_addr.low_part =
4568                         lower_32_bits(chroma_addr);
4569                 address->video_progressive.chroma_addr.high_part =
4570                         upper_32_bits(chroma_addr);
4571         }
4572
4573         if (adev->family >= AMDGPU_FAMILY_AI) {
4574                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4575                                                                 rotation, plane_size,
4576                                                                 tiling_info, dcc,
4577                                                                 address,
4578                                                                 force_disable_dcc);
4579                 if (ret)
4580                         return ret;
4581         } else {
4582                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4583         }
4584
4585         return 0;
4586 }
4587
4588 static void
4589 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4590                                bool *per_pixel_alpha, bool *global_alpha,
4591                                int *global_alpha_value)
4592 {
4593         *per_pixel_alpha = false;
4594         *global_alpha = false;
4595         *global_alpha_value = 0xff;
4596
4597         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4598                 return;
4599
4600         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4601                 static const uint32_t alpha_formats[] = {
4602                         DRM_FORMAT_ARGB8888,
4603                         DRM_FORMAT_RGBA8888,
4604                         DRM_FORMAT_ABGR8888,
4605                 };
4606                 uint32_t format = plane_state->fb->format->format;
4607                 unsigned int i;
4608
4609                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4610                         if (format == alpha_formats[i]) {
4611                                 *per_pixel_alpha = true;
4612                                 break;
4613                         }
4614                 }
4615         }
4616
4617         if (plane_state->alpha < 0xffff) {
4618                 *global_alpha = true;
4619                 *global_alpha_value = plane_state->alpha >> 8;
4620         }
4621 }
4622
4623 static int
4624 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4625                             const enum surface_pixel_format format,
4626                             enum dc_color_space *color_space)
4627 {
4628         bool full_range;
4629
4630         *color_space = COLOR_SPACE_SRGB;
4631
4632         /* DRM color properties only affect non-RGB formats. */
4633         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4634                 return 0;
4635
4636         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4637
4638         switch (plane_state->color_encoding) {
4639         case DRM_COLOR_YCBCR_BT601:
4640                 if (full_range)
4641                         *color_space = COLOR_SPACE_YCBCR601;
4642                 else
4643                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4644                 break;
4645
4646         case DRM_COLOR_YCBCR_BT709:
4647                 if (full_range)
4648                         *color_space = COLOR_SPACE_YCBCR709;
4649                 else
4650                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4651                 break;
4652
4653         case DRM_COLOR_YCBCR_BT2020:
4654                 if (full_range)
4655                         *color_space = COLOR_SPACE_2020_YCBCR;
4656                 else
4657                         return -EINVAL;
4658                 break;
4659
4660         default:
4661                 return -EINVAL;
4662         }
4663
4664         return 0;
4665 }
4666
4667 static int
4668 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4669                             const struct drm_plane_state *plane_state,
4670                             const uint64_t tiling_flags,
4671                             struct dc_plane_info *plane_info,
4672                             struct dc_plane_address *address,
4673                             bool tmz_surface,
4674                             bool force_disable_dcc)
4675 {
4676         const struct drm_framebuffer *fb = plane_state->fb;
4677         const struct amdgpu_framebuffer *afb =
4678                 to_amdgpu_framebuffer(plane_state->fb);
4679         struct drm_format_name_buf format_name;
4680         int ret;
4681
4682         memset(plane_info, 0, sizeof(*plane_info));
4683
4684         switch (fb->format->format) {
4685         case DRM_FORMAT_C8:
4686                 plane_info->format =
4687                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4688                 break;
4689         case DRM_FORMAT_RGB565:
4690                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4691                 break;
4692         case DRM_FORMAT_XRGB8888:
4693         case DRM_FORMAT_ARGB8888:
4694                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4695                 break;
4696         case DRM_FORMAT_XRGB2101010:
4697         case DRM_FORMAT_ARGB2101010:
4698                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4699                 break;
4700         case DRM_FORMAT_XBGR2101010:
4701         case DRM_FORMAT_ABGR2101010:
4702                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4703                 break;
4704         case DRM_FORMAT_XBGR8888:
4705         case DRM_FORMAT_ABGR8888:
4706                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4707                 break;
4708         case DRM_FORMAT_NV21:
4709                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4710                 break;
4711         case DRM_FORMAT_NV12:
4712                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4713                 break;
4714         case DRM_FORMAT_P010:
4715                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4716                 break;
4717         case DRM_FORMAT_XRGB16161616F:
4718         case DRM_FORMAT_ARGB16161616F:
4719                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4720                 break;
4721         case DRM_FORMAT_XBGR16161616F:
4722         case DRM_FORMAT_ABGR16161616F:
4723                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4724                 break;
4725         default:
4726                 DRM_ERROR(
4727                         "Unsupported screen format %s\n",
4728                         drm_get_format_name(fb->format->format, &format_name));
4729                 return -EINVAL;
4730         }
4731
4732         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4733         case DRM_MODE_ROTATE_0:
4734                 plane_info->rotation = ROTATION_ANGLE_0;
4735                 break;
4736         case DRM_MODE_ROTATE_90:
4737                 plane_info->rotation = ROTATION_ANGLE_90;
4738                 break;
4739         case DRM_MODE_ROTATE_180:
4740                 plane_info->rotation = ROTATION_ANGLE_180;
4741                 break;
4742         case DRM_MODE_ROTATE_270:
4743                 plane_info->rotation = ROTATION_ANGLE_270;
4744                 break;
4745         default:
4746                 plane_info->rotation = ROTATION_ANGLE_0;
4747                 break;
4748         }
4749
4750         plane_info->visible = true;
4751         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4752
4753         plane_info->layer_index = 0;
4754
4755         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4756                                           &plane_info->color_space);
4757         if (ret)
4758                 return ret;
4759
4760         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4761                                            plane_info->rotation, tiling_flags,
4762                                            &plane_info->tiling_info,
4763                                            &plane_info->plane_size,
4764                                            &plane_info->dcc, address, tmz_surface,
4765                                            force_disable_dcc);
4766         if (ret)
4767                 return ret;
4768
4769         fill_blending_from_plane_state(
4770                 plane_state, &plane_info->per_pixel_alpha,
4771                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4772
4773         return 0;
4774 }
4775
4776 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4777                                     struct dc_plane_state *dc_plane_state,
4778                                     struct drm_plane_state *plane_state,
4779                                     struct drm_crtc_state *crtc_state)
4780 {
4781         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4782         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4783         struct dc_scaling_info scaling_info;
4784         struct dc_plane_info plane_info;
4785         int ret;
4786         bool force_disable_dcc = false;
4787
4788         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4789         if (ret)
4790                 return ret;
4791
4792         dc_plane_state->src_rect = scaling_info.src_rect;
4793         dc_plane_state->dst_rect = scaling_info.dst_rect;
4794         dc_plane_state->clip_rect = scaling_info.clip_rect;
4795         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4796
4797         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4798         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4799                                           afb->tiling_flags,
4800                                           &plane_info,
4801                                           &dc_plane_state->address,
4802                                           afb->tmz_surface,
4803                                           force_disable_dcc);
4804         if (ret)
4805                 return ret;
4806
4807         dc_plane_state->format = plane_info.format;
4808         dc_plane_state->color_space = plane_info.color_space;
4809         dc_plane_state->format = plane_info.format;
4810         dc_plane_state->plane_size = plane_info.plane_size;
4811         dc_plane_state->rotation = plane_info.rotation;
4812         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4813         dc_plane_state->stereo_format = plane_info.stereo_format;
4814         dc_plane_state->tiling_info = plane_info.tiling_info;
4815         dc_plane_state->visible = plane_info.visible;
4816         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4817         dc_plane_state->global_alpha = plane_info.global_alpha;
4818         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4819         dc_plane_state->dcc = plane_info.dcc;
4820         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4821         dc_plane_state->flip_int_enabled = true;
4822
4823         /*
4824          * Always set input transfer function, since plane state is refreshed
4825          * every time.
4826          */
4827         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4828         if (ret)
4829                 return ret;
4830
4831         return 0;
4832 }
4833
4834 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4835                                            const struct dm_connector_state *dm_state,
4836                                            struct dc_stream_state *stream)
4837 {
4838         enum amdgpu_rmx_type rmx_type;
4839
4840         struct rect src = { 0 }; /* viewport in composition space*/
4841         struct rect dst = { 0 }; /* stream addressable area */
4842
4843         /* no mode. nothing to be done */
4844         if (!mode)
4845                 return;
4846
4847         /* Full screen scaling by default */
4848         src.width = mode->hdisplay;
4849         src.height = mode->vdisplay;
4850         dst.width = stream->timing.h_addressable;
4851         dst.height = stream->timing.v_addressable;
4852
4853         if (dm_state) {
4854                 rmx_type = dm_state->scaling;
4855                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4856                         if (src.width * dst.height <
4857                                         src.height * dst.width) {
4858                                 /* height needs less upscaling/more downscaling */
4859                                 dst.width = src.width *
4860                                                 dst.height / src.height;
4861                         } else {
4862                                 /* width needs less upscaling/more downscaling */
4863                                 dst.height = src.height *
4864                                                 dst.width / src.width;
4865                         }
4866                 } else if (rmx_type == RMX_CENTER) {
4867                         dst = src;
4868                 }
4869
4870                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4871                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4872
4873                 if (dm_state->underscan_enable) {
4874                         dst.x += dm_state->underscan_hborder / 2;
4875                         dst.y += dm_state->underscan_vborder / 2;
4876                         dst.width -= dm_state->underscan_hborder;
4877                         dst.height -= dm_state->underscan_vborder;
4878                 }
4879         }
4880
4881         stream->src = src;
4882         stream->dst = dst;
4883
4884         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4885                         dst.x, dst.y, dst.width, dst.height);
4886
4887 }
4888
4889 static enum dc_color_depth
4890 convert_color_depth_from_display_info(const struct drm_connector *connector,
4891                                       bool is_y420, int requested_bpc)
4892 {
4893         uint8_t bpc;
4894
4895         if (is_y420) {
4896                 bpc = 8;
4897
4898                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4899                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4900                         bpc = 16;
4901                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4902                         bpc = 12;
4903                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4904                         bpc = 10;
4905         } else {
4906                 bpc = (uint8_t)connector->display_info.bpc;
4907                 /* Assume 8 bpc by default if no bpc is specified. */
4908                 bpc = bpc ? bpc : 8;
4909         }
4910
4911         if (requested_bpc > 0) {
4912                 /*
4913                  * Cap display bpc based on the user requested value.
4914                  *
4915                  * The value for state->max_bpc may not correctly updated
4916                  * depending on when the connector gets added to the state
4917                  * or if this was called outside of atomic check, so it
4918                  * can't be used directly.
4919                  */
4920                 bpc = min_t(u8, bpc, requested_bpc);
4921
4922                 /* Round down to the nearest even number. */
4923                 bpc = bpc - (bpc & 1);
4924         }
4925
4926         switch (bpc) {
4927         case 0:
4928                 /*
4929                  * Temporary Work around, DRM doesn't parse color depth for
4930                  * EDID revision before 1.4
4931                  * TODO: Fix edid parsing
4932                  */
4933                 return COLOR_DEPTH_888;
4934         case 6:
4935                 return COLOR_DEPTH_666;
4936         case 8:
4937                 return COLOR_DEPTH_888;
4938         case 10:
4939                 return COLOR_DEPTH_101010;
4940         case 12:
4941                 return COLOR_DEPTH_121212;
4942         case 14:
4943                 return COLOR_DEPTH_141414;
4944         case 16:
4945                 return COLOR_DEPTH_161616;
4946         default:
4947                 return COLOR_DEPTH_UNDEFINED;
4948         }
4949 }
4950
4951 static enum dc_aspect_ratio
4952 get_aspect_ratio(const struct drm_display_mode *mode_in)
4953 {
4954         /* 1-1 mapping, since both enums follow the HDMI spec. */
4955         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4956 }
4957
4958 static enum dc_color_space
4959 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4960 {
4961         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4962
4963         switch (dc_crtc_timing->pixel_encoding) {
4964         case PIXEL_ENCODING_YCBCR422:
4965         case PIXEL_ENCODING_YCBCR444:
4966         case PIXEL_ENCODING_YCBCR420:
4967         {
4968                 /*
4969                  * 27030khz is the separation point between HDTV and SDTV
4970                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4971                  * respectively
4972                  */
4973                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4974                         if (dc_crtc_timing->flags.Y_ONLY)
4975                                 color_space =
4976                                         COLOR_SPACE_YCBCR709_LIMITED;
4977                         else
4978                                 color_space = COLOR_SPACE_YCBCR709;
4979                 } else {
4980                         if (dc_crtc_timing->flags.Y_ONLY)
4981                                 color_space =
4982                                         COLOR_SPACE_YCBCR601_LIMITED;
4983                         else
4984                                 color_space = COLOR_SPACE_YCBCR601;
4985                 }
4986
4987         }
4988         break;
4989         case PIXEL_ENCODING_RGB:
4990                 color_space = COLOR_SPACE_SRGB;
4991                 break;
4992
4993         default:
4994                 WARN_ON(1);
4995                 break;
4996         }
4997
4998         return color_space;
4999 }
5000
5001 static bool adjust_colour_depth_from_display_info(
5002         struct dc_crtc_timing *timing_out,
5003         const struct drm_display_info *info)
5004 {
5005         enum dc_color_depth depth = timing_out->display_color_depth;
5006         int normalized_clk;
5007         do {
5008                 normalized_clk = timing_out->pix_clk_100hz / 10;
5009                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5010                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5011                         normalized_clk /= 2;
5012                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5013                 switch (depth) {
5014                 case COLOR_DEPTH_888:
5015                         break;
5016                 case COLOR_DEPTH_101010:
5017                         normalized_clk = (normalized_clk * 30) / 24;
5018                         break;
5019                 case COLOR_DEPTH_121212:
5020                         normalized_clk = (normalized_clk * 36) / 24;
5021                         break;
5022                 case COLOR_DEPTH_161616:
5023                         normalized_clk = (normalized_clk * 48) / 24;
5024                         break;
5025                 default:
5026                         /* The above depths are the only ones valid for HDMI. */
5027                         return false;
5028                 }
5029                 if (normalized_clk <= info->max_tmds_clock) {
5030                         timing_out->display_color_depth = depth;
5031                         return true;
5032                 }
5033         } while (--depth > COLOR_DEPTH_666);
5034         return false;
5035 }
5036
5037 static void fill_stream_properties_from_drm_display_mode(
5038         struct dc_stream_state *stream,
5039         const struct drm_display_mode *mode_in,
5040         const struct drm_connector *connector,
5041         const struct drm_connector_state *connector_state,
5042         const struct dc_stream_state *old_stream,
5043         int requested_bpc)
5044 {
5045         struct dc_crtc_timing *timing_out = &stream->timing;
5046         const struct drm_display_info *info = &connector->display_info;
5047         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5048         struct hdmi_vendor_infoframe hv_frame;
5049         struct hdmi_avi_infoframe avi_frame;
5050
5051         memset(&hv_frame, 0, sizeof(hv_frame));
5052         memset(&avi_frame, 0, sizeof(avi_frame));
5053
5054         timing_out->h_border_left = 0;
5055         timing_out->h_border_right = 0;
5056         timing_out->v_border_top = 0;
5057         timing_out->v_border_bottom = 0;
5058         /* TODO: un-hardcode */
5059         if (drm_mode_is_420_only(info, mode_in)
5060                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5061                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5062         else if (drm_mode_is_420_also(info, mode_in)
5063                         && aconnector->force_yuv420_output)
5064                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5065         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5066                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5067                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5068         else
5069                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5070
5071         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5072         timing_out->display_color_depth = convert_color_depth_from_display_info(
5073                 connector,
5074                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5075                 requested_bpc);
5076         timing_out->scan_type = SCANNING_TYPE_NODATA;
5077         timing_out->hdmi_vic = 0;
5078
5079         if(old_stream) {
5080                 timing_out->vic = old_stream->timing.vic;
5081                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5082                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5083         } else {
5084                 timing_out->vic = drm_match_cea_mode(mode_in);
5085                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5086                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5087                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5088                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5089         }
5090
5091         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5092                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5093                 timing_out->vic = avi_frame.video_code;
5094                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5095                 timing_out->hdmi_vic = hv_frame.vic;
5096         }
5097
5098         timing_out->h_addressable = mode_in->hdisplay;
5099         timing_out->h_total = mode_in->htotal;
5100         timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5101         timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5102         timing_out->v_total = mode_in->vtotal;
5103         timing_out->v_addressable = mode_in->vdisplay;
5104         timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5105         timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5106         timing_out->pix_clk_100hz = mode_in->clock * 10;
5107
5108         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5109
5110         stream->output_color_space = get_output_color_space(timing_out);
5111
5112         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5113         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5114         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5115                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5116                     drm_mode_is_420_also(info, mode_in) &&
5117                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5118                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5119                         adjust_colour_depth_from_display_info(timing_out, info);
5120                 }
5121         }
5122 }
5123
5124 static void fill_audio_info(struct audio_info *audio_info,
5125                             const struct drm_connector *drm_connector,
5126                             const struct dc_sink *dc_sink)
5127 {
5128         int i = 0;
5129         int cea_revision = 0;
5130         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5131
5132         audio_info->manufacture_id = edid_caps->manufacturer_id;
5133         audio_info->product_id = edid_caps->product_id;
5134
5135         cea_revision = drm_connector->display_info.cea_rev;
5136
5137         strscpy(audio_info->display_name,
5138                 edid_caps->display_name,
5139                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5140
5141         if (cea_revision >= 3) {
5142                 audio_info->mode_count = edid_caps->audio_mode_count;
5143
5144                 for (i = 0; i < audio_info->mode_count; ++i) {
5145                         audio_info->modes[i].format_code =
5146                                         (enum audio_format_code)
5147                                         (edid_caps->audio_modes[i].format_code);
5148                         audio_info->modes[i].channel_count =
5149                                         edid_caps->audio_modes[i].channel_count;
5150                         audio_info->modes[i].sample_rates.all =
5151                                         edid_caps->audio_modes[i].sample_rate;
5152                         audio_info->modes[i].sample_size =
5153                                         edid_caps->audio_modes[i].sample_size;
5154                 }
5155         }
5156
5157         audio_info->flags.all = edid_caps->speaker_flags;
5158
5159         /* TODO: We only check for the progressive mode, check for interlace mode too */
5160         if (drm_connector->latency_present[0]) {
5161                 audio_info->video_latency = drm_connector->video_latency[0];
5162                 audio_info->audio_latency = drm_connector->audio_latency[0];
5163         }
5164
5165         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5166
5167 }
5168
5169 static void
5170 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5171                                       struct drm_display_mode *dst_mode)
5172 {
5173         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5174         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5175         dst_mode->crtc_clock = src_mode->crtc_clock;
5176         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5177         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5178         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5179         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5180         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5181         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5182         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5183         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5184         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5185         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5186         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5187 }
5188
5189 static void
5190 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5191                                         const struct drm_display_mode *native_mode,
5192                                         bool scale_enabled)
5193 {
5194         if (scale_enabled) {
5195                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5196         } else if (native_mode->clock == drm_mode->clock &&
5197                         native_mode->htotal == drm_mode->htotal &&
5198                         native_mode->vtotal == drm_mode->vtotal) {
5199                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5200         } else {
5201                 /* no scaling nor amdgpu inserted, no need to patch */
5202         }
5203 }
5204
5205 static struct dc_sink *
5206 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5207 {
5208         struct dc_sink_init_data sink_init_data = { 0 };
5209         struct dc_sink *sink = NULL;
5210         sink_init_data.link = aconnector->dc_link;
5211         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5212
5213         sink = dc_sink_create(&sink_init_data);
5214         if (!sink) {
5215                 DRM_ERROR("Failed to create sink!\n");
5216                 return NULL;
5217         }
5218         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5219
5220         return sink;
5221 }
5222
5223 static void set_multisync_trigger_params(
5224                 struct dc_stream_state *stream)
5225 {
5226         if (stream->triggered_crtc_reset.enabled) {
5227                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5228                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5229         }
5230 }
5231
5232 static void set_master_stream(struct dc_stream_state *stream_set[],
5233                               int stream_count)
5234 {
5235         int j, highest_rfr = 0, master_stream = 0;
5236
5237         for (j = 0;  j < stream_count; j++) {
5238                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5239                         int refresh_rate = 0;
5240
5241                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5242                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5243                         if (refresh_rate > highest_rfr) {
5244                                 highest_rfr = refresh_rate;
5245                                 master_stream = j;
5246                         }
5247                 }
5248         }
5249         for (j = 0;  j < stream_count; j++) {
5250                 if (stream_set[j])
5251                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5252         }
5253 }
5254
5255 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5256 {
5257         int i = 0;
5258
5259         if (context->stream_count < 2)
5260                 return;
5261         for (i = 0; i < context->stream_count ; i++) {
5262                 if (!context->streams[i])
5263                         continue;
5264                 /*
5265                  * TODO: add a function to read AMD VSDB bits and set
5266                  * crtc_sync_master.multi_sync_enabled flag
5267                  * For now it's set to false
5268                  */
5269                 set_multisync_trigger_params(context->streams[i]);
5270         }
5271         set_master_stream(context->streams, context->stream_count);
5272 }
5273
5274 static struct drm_display_mode *
5275 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5276                           bool use_probed_modes)
5277 {
5278         struct drm_display_mode *m, *m_pref = NULL;
5279         u16 current_refresh, highest_refresh;
5280         struct list_head *list_head = use_probed_modes ?
5281                                                     &aconnector->base.probed_modes :
5282                                                     &aconnector->base.modes;
5283
5284         if (aconnector->freesync_vid_base.clock != 0)
5285                 return &aconnector->freesync_vid_base;
5286
5287         /* Find the preferred mode */
5288         list_for_each_entry (m, list_head, head) {
5289                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5290                         m_pref = m;
5291                         break;
5292                 }
5293         }
5294
5295         if (!m_pref) {
5296                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5297                 m_pref = list_first_entry_or_null(
5298                         &aconnector->base.modes, struct drm_display_mode, head);
5299                 if (!m_pref) {
5300                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5301                         return NULL;
5302                 }
5303         }
5304
5305         highest_refresh = drm_mode_vrefresh(m_pref);
5306
5307         /*
5308          * Find the mode with highest refresh rate with same resolution.
5309          * For some monitors, preferred mode is not the mode with highest
5310          * supported refresh rate.
5311          */
5312         list_for_each_entry (m, list_head, head) {
5313                 current_refresh  = drm_mode_vrefresh(m);
5314
5315                 if (m->hdisplay == m_pref->hdisplay &&
5316                     m->vdisplay == m_pref->vdisplay &&
5317                     highest_refresh < current_refresh) {
5318                         highest_refresh = current_refresh;
5319                         m_pref = m;
5320                 }
5321         }
5322
5323         aconnector->freesync_vid_base = *m_pref;
5324         return m_pref;
5325 }
5326
5327 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5328                                    struct amdgpu_dm_connector *aconnector)
5329 {
5330         struct drm_display_mode *high_mode;
5331         int timing_diff;
5332
5333         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5334         if (!high_mode || !mode)
5335                 return false;
5336
5337         timing_diff = high_mode->vtotal - mode->vtotal;
5338
5339         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5340             high_mode->hdisplay != mode->hdisplay ||
5341             high_mode->vdisplay != mode->vdisplay ||
5342             high_mode->hsync_start != mode->hsync_start ||
5343             high_mode->hsync_end != mode->hsync_end ||
5344             high_mode->htotal != mode->htotal ||
5345             high_mode->hskew != mode->hskew ||
5346             high_mode->vscan != mode->vscan ||
5347             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5348             high_mode->vsync_end - mode->vsync_end != timing_diff)
5349                 return false;
5350         else
5351                 return true;
5352 }
5353
5354 static struct dc_stream_state *
5355 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5356                        const struct drm_display_mode *drm_mode,
5357                        const struct dm_connector_state *dm_state,
5358                        const struct dc_stream_state *old_stream,
5359                        int requested_bpc)
5360 {
5361         struct drm_display_mode *preferred_mode = NULL;
5362         struct drm_connector *drm_connector;
5363         const struct drm_connector_state *con_state =
5364                 dm_state ? &dm_state->base : NULL;
5365         struct dc_stream_state *stream = NULL;
5366         struct drm_display_mode mode = *drm_mode;
5367         struct drm_display_mode saved_mode;
5368         struct drm_display_mode *freesync_mode = NULL;
5369         bool native_mode_found = false;
5370         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5371         int mode_refresh;
5372         int preferred_refresh = 0;
5373 #if defined(CONFIG_DRM_AMD_DC_DCN)
5374         struct dsc_dec_dpcd_caps dsc_caps;
5375         uint32_t link_bandwidth_kbps;
5376 #endif
5377         struct dc_sink *sink = NULL;
5378
5379         memset(&saved_mode, 0, sizeof(saved_mode));
5380
5381         if (aconnector == NULL) {
5382                 DRM_ERROR("aconnector is NULL!\n");
5383                 return stream;
5384         }
5385
5386         drm_connector = &aconnector->base;
5387
5388         if (!aconnector->dc_sink) {
5389                 sink = create_fake_sink(aconnector);
5390                 if (!sink)
5391                         return stream;
5392         } else {
5393                 sink = aconnector->dc_sink;
5394                 dc_sink_retain(sink);
5395         }
5396
5397         stream = dc_create_stream_for_sink(sink);
5398
5399         if (stream == NULL) {
5400                 DRM_ERROR("Failed to create stream for sink!\n");
5401                 goto finish;
5402         }
5403
5404         stream->dm_stream_context = aconnector;
5405
5406         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5407                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5408
5409         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5410                 /* Search for preferred mode */
5411                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5412                         native_mode_found = true;
5413                         break;
5414                 }
5415         }
5416         if (!native_mode_found)
5417                 preferred_mode = list_first_entry_or_null(
5418                                 &aconnector->base.modes,
5419                                 struct drm_display_mode,
5420                                 head);
5421
5422         mode_refresh = drm_mode_vrefresh(&mode);
5423
5424         if (preferred_mode == NULL) {
5425                 /*
5426                  * This may not be an error, the use case is when we have no
5427                  * usermode calls to reset and set mode upon hotplug. In this
5428                  * case, we call set mode ourselves to restore the previous mode
5429                  * and the modelist may not be filled in in time.
5430                  */
5431                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5432         } else {
5433                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5434                                  is_freesync_video_mode(&mode, aconnector);
5435                 if (recalculate_timing) {
5436                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5437                         saved_mode = mode;
5438                         mode = *freesync_mode;
5439                 } else {
5440                         decide_crtc_timing_for_drm_display_mode(
5441                                 &mode, preferred_mode,
5442                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5443                 }
5444
5445                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5446         }
5447
5448         if (recalculate_timing)
5449                 drm_mode_set_crtcinfo(&saved_mode, 0);
5450         else
5451                 drm_mode_set_crtcinfo(&mode, 0);
5452
5453        /*
5454         * If scaling is enabled and refresh rate didn't change
5455         * we copy the vic and polarities of the old timings
5456         */
5457         if (!recalculate_timing || mode_refresh != preferred_refresh)
5458                 fill_stream_properties_from_drm_display_mode(
5459                         stream, &mode, &aconnector->base, con_state, NULL,
5460                         requested_bpc);
5461         else
5462                 fill_stream_properties_from_drm_display_mode(
5463                         stream, &mode, &aconnector->base, con_state, old_stream,
5464                         requested_bpc);
5465
5466         stream->timing.flags.DSC = 0;
5467
5468         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5469 #if defined(CONFIG_DRM_AMD_DC_DCN)
5470                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5471                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5472                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5473                                       &dsc_caps);
5474                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5475                                                              dc_link_get_link_cap(aconnector->dc_link));
5476
5477                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5478                         /* Set DSC policy according to dsc_clock_en */
5479                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5480                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5481
5482                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5483                                                   &dsc_caps,
5484                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5485                                                   0,
5486                                                   link_bandwidth_kbps,
5487                                                   &stream->timing,
5488                                                   &stream->timing.dsc_cfg))
5489                                 stream->timing.flags.DSC = 1;
5490                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5491                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5492                                 stream->timing.flags.DSC = 1;
5493
5494                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5495                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5496
5497                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5498                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5499
5500                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5501                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5502                 }
5503 #endif
5504         }
5505
5506         update_stream_scaling_settings(&mode, dm_state, stream);
5507
5508         fill_audio_info(
5509                 &stream->audio_info,
5510                 drm_connector,
5511                 sink);
5512
5513         update_stream_signal(stream, sink);
5514
5515         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5516                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5517
5518         if (stream->link->psr_settings.psr_feature_enabled) {
5519                 //
5520                 // should decide stream support vsc sdp colorimetry capability
5521                 // before building vsc info packet
5522                 //
5523                 stream->use_vsc_sdp_for_colorimetry = false;
5524                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5525                         stream->use_vsc_sdp_for_colorimetry =
5526                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5527                 } else {
5528                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5529                                 stream->use_vsc_sdp_for_colorimetry = true;
5530                 }
5531                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5532         }
5533 finish:
5534         dc_sink_release(sink);
5535
5536         return stream;
5537 }
5538
5539 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5540 {
5541         drm_crtc_cleanup(crtc);
5542         kfree(crtc);
5543 }
5544
5545 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5546                                   struct drm_crtc_state *state)
5547 {
5548         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5549
5550         /* TODO Destroy dc_stream objects are stream object is flattened */
5551         if (cur->stream)
5552                 dc_stream_release(cur->stream);
5553
5554
5555         __drm_atomic_helper_crtc_destroy_state(state);
5556
5557
5558         kfree(state);
5559 }
5560
5561 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5562 {
5563         struct dm_crtc_state *state;
5564
5565         if (crtc->state)
5566                 dm_crtc_destroy_state(crtc, crtc->state);
5567
5568         state = kzalloc(sizeof(*state), GFP_KERNEL);
5569         if (WARN_ON(!state))
5570                 return;
5571
5572         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5573 }
5574
5575 static struct drm_crtc_state *
5576 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5577 {
5578         struct dm_crtc_state *state, *cur;
5579
5580         cur = to_dm_crtc_state(crtc->state);
5581
5582         if (WARN_ON(!crtc->state))
5583                 return NULL;
5584
5585         state = kzalloc(sizeof(*state), GFP_KERNEL);
5586         if (!state)
5587                 return NULL;
5588
5589         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5590
5591         if (cur->stream) {
5592                 state->stream = cur->stream;
5593                 dc_stream_retain(state->stream);
5594         }
5595
5596         state->active_planes = cur->active_planes;
5597         state->vrr_infopacket = cur->vrr_infopacket;
5598         state->abm_level = cur->abm_level;
5599         state->vrr_supported = cur->vrr_supported;
5600         state->freesync_config = cur->freesync_config;
5601         state->cm_has_degamma = cur->cm_has_degamma;
5602         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5603         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5604
5605         return &state->base;
5606 }
5607
5608 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5609 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5610 {
5611         crtc_debugfs_init(crtc);
5612
5613         return 0;
5614 }
5615 #endif
5616
5617 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5618 {
5619         enum dc_irq_source irq_source;
5620         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5621         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5622         int rc;
5623
5624         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5625
5626         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5627
5628         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5629                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5630         return rc;
5631 }
5632
5633 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5634 {
5635         enum dc_irq_source irq_source;
5636         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5637         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5638         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5639 #if defined(CONFIG_DRM_AMD_DC_DCN)
5640         struct amdgpu_display_manager *dm = &adev->dm;
5641         unsigned long flags;
5642 #endif
5643         int rc = 0;
5644
5645         if (enable) {
5646                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5647                 if (amdgpu_dm_vrr_active(acrtc_state))
5648                         rc = dm_set_vupdate_irq(crtc, true);
5649         } else {
5650                 /* vblank irq off -> vupdate irq off */
5651                 rc = dm_set_vupdate_irq(crtc, false);
5652         }
5653
5654         if (rc)
5655                 return rc;
5656
5657         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5658
5659         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5660                 return -EBUSY;
5661
5662         if (amdgpu_in_reset(adev))
5663                 return 0;
5664
5665 #if defined(CONFIG_DRM_AMD_DC_DCN)
5666         spin_lock_irqsave(&dm->vblank_lock, flags);
5667         dm->vblank_workqueue->dm = dm;
5668         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5669         dm->vblank_workqueue->enable = enable;
5670         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5671         schedule_work(&dm->vblank_workqueue->mall_work);
5672 #endif
5673
5674         return 0;
5675 }
5676
5677 static int dm_enable_vblank(struct drm_crtc *crtc)
5678 {
5679         return dm_set_vblank(crtc, true);
5680 }
5681
5682 static void dm_disable_vblank(struct drm_crtc *crtc)
5683 {
5684         dm_set_vblank(crtc, false);
5685 }
5686
5687 /* Implemented only the options currently availible for the driver */
5688 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5689         .reset = dm_crtc_reset_state,
5690         .destroy = amdgpu_dm_crtc_destroy,
5691         .set_config = drm_atomic_helper_set_config,
5692         .page_flip = drm_atomic_helper_page_flip,
5693         .atomic_duplicate_state = dm_crtc_duplicate_state,
5694         .atomic_destroy_state = dm_crtc_destroy_state,
5695         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5696         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5697         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5698         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5699         .enable_vblank = dm_enable_vblank,
5700         .disable_vblank = dm_disable_vblank,
5701         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5703         .late_register = amdgpu_dm_crtc_late_register,
5704 #endif
5705 };
5706
5707 static enum drm_connector_status
5708 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5709 {
5710         bool connected;
5711         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5712
5713         /*
5714          * Notes:
5715          * 1. This interface is NOT called in context of HPD irq.
5716          * 2. This interface *is called* in context of user-mode ioctl. Which
5717          * makes it a bad place for *any* MST-related activity.
5718          */
5719
5720         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5721             !aconnector->fake_enable)
5722                 connected = (aconnector->dc_sink != NULL);
5723         else
5724                 connected = (aconnector->base.force == DRM_FORCE_ON);
5725
5726         update_subconnector_property(aconnector);
5727
5728         return (connected ? connector_status_connected :
5729                         connector_status_disconnected);
5730 }
5731
5732 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5733                                             struct drm_connector_state *connector_state,
5734                                             struct drm_property *property,
5735                                             uint64_t val)
5736 {
5737         struct drm_device *dev = connector->dev;
5738         struct amdgpu_device *adev = drm_to_adev(dev);
5739         struct dm_connector_state *dm_old_state =
5740                 to_dm_connector_state(connector->state);
5741         struct dm_connector_state *dm_new_state =
5742                 to_dm_connector_state(connector_state);
5743
5744         int ret = -EINVAL;
5745
5746         if (property == dev->mode_config.scaling_mode_property) {
5747                 enum amdgpu_rmx_type rmx_type;
5748
5749                 switch (val) {
5750                 case DRM_MODE_SCALE_CENTER:
5751                         rmx_type = RMX_CENTER;
5752                         break;
5753                 case DRM_MODE_SCALE_ASPECT:
5754                         rmx_type = RMX_ASPECT;
5755                         break;
5756                 case DRM_MODE_SCALE_FULLSCREEN:
5757                         rmx_type = RMX_FULL;
5758                         break;
5759                 case DRM_MODE_SCALE_NONE:
5760                 default:
5761                         rmx_type = RMX_OFF;
5762                         break;
5763                 }
5764
5765                 if (dm_old_state->scaling == rmx_type)
5766                         return 0;
5767
5768                 dm_new_state->scaling = rmx_type;
5769                 ret = 0;
5770         } else if (property == adev->mode_info.underscan_hborder_property) {
5771                 dm_new_state->underscan_hborder = val;
5772                 ret = 0;
5773         } else if (property == adev->mode_info.underscan_vborder_property) {
5774                 dm_new_state->underscan_vborder = val;
5775                 ret = 0;
5776         } else if (property == adev->mode_info.underscan_property) {
5777                 dm_new_state->underscan_enable = val;
5778                 ret = 0;
5779         } else if (property == adev->mode_info.abm_level_property) {
5780                 dm_new_state->abm_level = val;
5781                 ret = 0;
5782         }
5783
5784         return ret;
5785 }
5786
5787 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5788                                             const struct drm_connector_state *state,
5789                                             struct drm_property *property,
5790                                             uint64_t *val)
5791 {
5792         struct drm_device *dev = connector->dev;
5793         struct amdgpu_device *adev = drm_to_adev(dev);
5794         struct dm_connector_state *dm_state =
5795                 to_dm_connector_state(state);
5796         int ret = -EINVAL;
5797
5798         if (property == dev->mode_config.scaling_mode_property) {
5799                 switch (dm_state->scaling) {
5800                 case RMX_CENTER:
5801                         *val = DRM_MODE_SCALE_CENTER;
5802                         break;
5803                 case RMX_ASPECT:
5804                         *val = DRM_MODE_SCALE_ASPECT;
5805                         break;
5806                 case RMX_FULL:
5807                         *val = DRM_MODE_SCALE_FULLSCREEN;
5808                         break;
5809                 case RMX_OFF:
5810                 default:
5811                         *val = DRM_MODE_SCALE_NONE;
5812                         break;
5813                 }
5814                 ret = 0;
5815         } else if (property == adev->mode_info.underscan_hborder_property) {
5816                 *val = dm_state->underscan_hborder;
5817                 ret = 0;
5818         } else if (property == adev->mode_info.underscan_vborder_property) {
5819                 *val = dm_state->underscan_vborder;
5820                 ret = 0;
5821         } else if (property == adev->mode_info.underscan_property) {
5822                 *val = dm_state->underscan_enable;
5823                 ret = 0;
5824         } else if (property == adev->mode_info.abm_level_property) {
5825                 *val = dm_state->abm_level;
5826                 ret = 0;
5827         }
5828
5829         return ret;
5830 }
5831
5832 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5833 {
5834         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5835
5836         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5837 }
5838
5839 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5840 {
5841         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5842         const struct dc_link *link = aconnector->dc_link;
5843         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5844         struct amdgpu_display_manager *dm = &adev->dm;
5845
5846         /*
5847          * Call only if mst_mgr was iniitalized before since it's not done
5848          * for all connector types.
5849          */
5850         if (aconnector->mst_mgr.dev)
5851                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5852
5853 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5854         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5855
5856         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5857             link->type != dc_connection_none &&
5858             dm->backlight_dev) {
5859                 backlight_device_unregister(dm->backlight_dev);
5860                 dm->backlight_dev = NULL;
5861         }
5862 #endif
5863
5864         if (aconnector->dc_em_sink)
5865                 dc_sink_release(aconnector->dc_em_sink);
5866         aconnector->dc_em_sink = NULL;
5867         if (aconnector->dc_sink)
5868                 dc_sink_release(aconnector->dc_sink);
5869         aconnector->dc_sink = NULL;
5870
5871         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5872         drm_connector_unregister(connector);
5873         drm_connector_cleanup(connector);
5874         if (aconnector->i2c) {
5875                 i2c_del_adapter(&aconnector->i2c->base);
5876                 kfree(aconnector->i2c);
5877         }
5878         kfree(aconnector->dm_dp_aux.aux.name);
5879
5880         kfree(connector);
5881 }
5882
5883 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5884 {
5885         struct dm_connector_state *state =
5886                 to_dm_connector_state(connector->state);
5887
5888         if (connector->state)
5889                 __drm_atomic_helper_connector_destroy_state(connector->state);
5890
5891         kfree(state);
5892
5893         state = kzalloc(sizeof(*state), GFP_KERNEL);
5894
5895         if (state) {
5896                 state->scaling = RMX_OFF;
5897                 state->underscan_enable = false;
5898                 state->underscan_hborder = 0;
5899                 state->underscan_vborder = 0;
5900                 state->base.max_requested_bpc = 8;
5901                 state->vcpi_slots = 0;
5902                 state->pbn = 0;
5903                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5904                         state->abm_level = amdgpu_dm_abm_level;
5905
5906                 __drm_atomic_helper_connector_reset(connector, &state->base);
5907         }
5908 }
5909
5910 struct drm_connector_state *
5911 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5912 {
5913         struct dm_connector_state *state =
5914                 to_dm_connector_state(connector->state);
5915
5916         struct dm_connector_state *new_state =
5917                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5918
5919         if (!new_state)
5920                 return NULL;
5921
5922         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5923
5924         new_state->freesync_capable = state->freesync_capable;
5925         new_state->abm_level = state->abm_level;
5926         new_state->scaling = state->scaling;
5927         new_state->underscan_enable = state->underscan_enable;
5928         new_state->underscan_hborder = state->underscan_hborder;
5929         new_state->underscan_vborder = state->underscan_vborder;
5930         new_state->vcpi_slots = state->vcpi_slots;
5931         new_state->pbn = state->pbn;
5932         return &new_state->base;
5933 }
5934
5935 static int
5936 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5937 {
5938         struct amdgpu_dm_connector *amdgpu_dm_connector =
5939                 to_amdgpu_dm_connector(connector);
5940         int r;
5941
5942         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5943             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5944                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5945                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5946                 if (r)
5947                         return r;
5948         }
5949
5950 #if defined(CONFIG_DEBUG_FS)
5951         connector_debugfs_init(amdgpu_dm_connector);
5952 #endif
5953
5954         return 0;
5955 }
5956
5957 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5958         .reset = amdgpu_dm_connector_funcs_reset,
5959         .detect = amdgpu_dm_connector_detect,
5960         .fill_modes = drm_helper_probe_single_connector_modes,
5961         .destroy = amdgpu_dm_connector_destroy,
5962         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5963         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5964         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5965         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5966         .late_register = amdgpu_dm_connector_late_register,
5967         .early_unregister = amdgpu_dm_connector_unregister
5968 };
5969
5970 static int get_modes(struct drm_connector *connector)
5971 {
5972         return amdgpu_dm_connector_get_modes(connector);
5973 }
5974
5975 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5976 {
5977         struct dc_sink_init_data init_params = {
5978                         .link = aconnector->dc_link,
5979                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5980         };
5981         struct edid *edid;
5982
5983         if (!aconnector->base.edid_blob_ptr) {
5984                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5985                                 aconnector->base.name);
5986
5987                 aconnector->base.force = DRM_FORCE_OFF;
5988                 aconnector->base.override_edid = false;
5989                 return;
5990         }
5991
5992         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5993
5994         aconnector->edid = edid;
5995
5996         aconnector->dc_em_sink = dc_link_add_remote_sink(
5997                 aconnector->dc_link,
5998                 (uint8_t *)edid,
5999                 (edid->extensions + 1) * EDID_LENGTH,
6000                 &init_params);
6001
6002         if (aconnector->base.force == DRM_FORCE_ON) {
6003                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6004                 aconnector->dc_link->local_sink :
6005                 aconnector->dc_em_sink;
6006                 dc_sink_retain(aconnector->dc_sink);
6007         }
6008 }
6009
6010 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6011 {
6012         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6013
6014         /*
6015          * In case of headless boot with force on for DP managed connector
6016          * Those settings have to be != 0 to get initial modeset
6017          */
6018         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6019                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6020                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6021         }
6022
6023
6024         aconnector->base.override_edid = true;
6025         create_eml_sink(aconnector);
6026 }
6027
6028 static struct dc_stream_state *
6029 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6030                                 const struct drm_display_mode *drm_mode,
6031                                 const struct dm_connector_state *dm_state,
6032                                 const struct dc_stream_state *old_stream)
6033 {
6034         struct drm_connector *connector = &aconnector->base;
6035         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6036         struct dc_stream_state *stream;
6037         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6038         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6039         enum dc_status dc_result = DC_OK;
6040
6041         do {
6042                 stream = create_stream_for_sink(aconnector, drm_mode,
6043                                                 dm_state, old_stream,
6044                                                 requested_bpc);
6045                 if (stream == NULL) {
6046                         DRM_ERROR("Failed to create stream for sink!\n");
6047                         break;
6048                 }
6049
6050                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6051
6052                 if (dc_result != DC_OK) {
6053                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6054                                       drm_mode->hdisplay,
6055                                       drm_mode->vdisplay,
6056                                       drm_mode->clock,
6057                                       dc_result,
6058                                       dc_status_to_str(dc_result));
6059
6060                         dc_stream_release(stream);
6061                         stream = NULL;
6062                         requested_bpc -= 2; /* lower bpc to retry validation */
6063                 }
6064
6065         } while (stream == NULL && requested_bpc >= 6);
6066
6067         return stream;
6068 }
6069
6070 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6071                                    struct drm_display_mode *mode)
6072 {
6073         int result = MODE_ERROR;
6074         struct dc_sink *dc_sink;
6075         /* TODO: Unhardcode stream count */
6076         struct dc_stream_state *stream;
6077         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6078
6079         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6080                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6081                 return result;
6082
6083         /*
6084          * Only run this the first time mode_valid is called to initilialize
6085          * EDID mgmt
6086          */
6087         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6088                 !aconnector->dc_em_sink)
6089                 handle_edid_mgmt(aconnector);
6090
6091         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6092
6093         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6094                                 aconnector->base.force != DRM_FORCE_ON) {
6095                 DRM_ERROR("dc_sink is NULL!\n");
6096                 goto fail;
6097         }
6098
6099         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6100         if (stream) {
6101                 dc_stream_release(stream);
6102                 result = MODE_OK;
6103         }
6104
6105 fail:
6106         /* TODO: error handling*/
6107         return result;
6108 }
6109
6110 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6111                                 struct dc_info_packet *out)
6112 {
6113         struct hdmi_drm_infoframe frame;
6114         unsigned char buf[30]; /* 26 + 4 */
6115         ssize_t len;
6116         int ret, i;
6117
6118         memset(out, 0, sizeof(*out));
6119
6120         if (!state->hdr_output_metadata)
6121                 return 0;
6122
6123         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6124         if (ret)
6125                 return ret;
6126
6127         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6128         if (len < 0)
6129                 return (int)len;
6130
6131         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6132         if (len != 30)
6133                 return -EINVAL;
6134
6135         /* Prepare the infopacket for DC. */
6136         switch (state->connector->connector_type) {
6137         case DRM_MODE_CONNECTOR_HDMIA:
6138                 out->hb0 = 0x87; /* type */
6139                 out->hb1 = 0x01; /* version */
6140                 out->hb2 = 0x1A; /* length */
6141                 out->sb[0] = buf[3]; /* checksum */
6142                 i = 1;
6143                 break;
6144
6145         case DRM_MODE_CONNECTOR_DisplayPort:
6146         case DRM_MODE_CONNECTOR_eDP:
6147                 out->hb0 = 0x00; /* sdp id, zero */
6148                 out->hb1 = 0x87; /* type */
6149                 out->hb2 = 0x1D; /* payload len - 1 */
6150                 out->hb3 = (0x13 << 2); /* sdp version */
6151                 out->sb[0] = 0x01; /* version */
6152                 out->sb[1] = 0x1A; /* length */
6153                 i = 2;
6154                 break;
6155
6156         default:
6157                 return -EINVAL;
6158         }
6159
6160         memcpy(&out->sb[i], &buf[4], 26);
6161         out->valid = true;
6162
6163         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6164                        sizeof(out->sb), false);
6165
6166         return 0;
6167 }
6168
6169 static bool
6170 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6171                           const struct drm_connector_state *new_state)
6172 {
6173         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6174         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6175
6176         if (old_blob != new_blob) {
6177                 if (old_blob && new_blob &&
6178                     old_blob->length == new_blob->length)
6179                         return memcmp(old_blob->data, new_blob->data,
6180                                       old_blob->length);
6181
6182                 return true;
6183         }
6184
6185         return false;
6186 }
6187
6188 static int
6189 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6190                                  struct drm_atomic_state *state)
6191 {
6192         struct drm_connector_state *new_con_state =
6193                 drm_atomic_get_new_connector_state(state, conn);
6194         struct drm_connector_state *old_con_state =
6195                 drm_atomic_get_old_connector_state(state, conn);
6196         struct drm_crtc *crtc = new_con_state->crtc;
6197         struct drm_crtc_state *new_crtc_state;
6198         int ret;
6199
6200         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6201
6202         if (!crtc)
6203                 return 0;
6204
6205         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6206                 struct dc_info_packet hdr_infopacket;
6207
6208                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6209                 if (ret)
6210                         return ret;
6211
6212                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6213                 if (IS_ERR(new_crtc_state))
6214                         return PTR_ERR(new_crtc_state);
6215
6216                 /*
6217                  * DC considers the stream backends changed if the
6218                  * static metadata changes. Forcing the modeset also
6219                  * gives a simple way for userspace to switch from
6220                  * 8bpc to 10bpc when setting the metadata to enter
6221                  * or exit HDR.
6222                  *
6223                  * Changing the static metadata after it's been
6224                  * set is permissible, however. So only force a
6225                  * modeset if we're entering or exiting HDR.
6226                  */
6227                 new_crtc_state->mode_changed =
6228                         !old_con_state->hdr_output_metadata ||
6229                         !new_con_state->hdr_output_metadata;
6230         }
6231
6232         return 0;
6233 }
6234
6235 static const struct drm_connector_helper_funcs
6236 amdgpu_dm_connector_helper_funcs = {
6237         /*
6238          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6239          * modes will be filtered by drm_mode_validate_size(), and those modes
6240          * are missing after user start lightdm. So we need to renew modes list.
6241          * in get_modes call back, not just return the modes count
6242          */
6243         .get_modes = get_modes,
6244         .mode_valid = amdgpu_dm_connector_mode_valid,
6245         .atomic_check = amdgpu_dm_connector_atomic_check,
6246 };
6247
6248 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6249 {
6250 }
6251
6252 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6253 {
6254         struct drm_atomic_state *state = new_crtc_state->state;
6255         struct drm_plane *plane;
6256         int num_active = 0;
6257
6258         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6259                 struct drm_plane_state *new_plane_state;
6260
6261                 /* Cursor planes are "fake". */
6262                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6263                         continue;
6264
6265                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6266
6267                 if (!new_plane_state) {
6268                         /*
6269                          * The plane is enable on the CRTC and hasn't changed
6270                          * state. This means that it previously passed
6271                          * validation and is therefore enabled.
6272                          */
6273                         num_active += 1;
6274                         continue;
6275                 }
6276
6277                 /* We need a framebuffer to be considered enabled. */
6278                 num_active += (new_plane_state->fb != NULL);
6279         }
6280
6281         return num_active;
6282 }
6283
6284 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6285                                          struct drm_crtc_state *new_crtc_state)
6286 {
6287         struct dm_crtc_state *dm_new_crtc_state =
6288                 to_dm_crtc_state(new_crtc_state);
6289
6290         dm_new_crtc_state->active_planes = 0;
6291
6292         if (!dm_new_crtc_state->stream)
6293                 return;
6294
6295         dm_new_crtc_state->active_planes =
6296                 count_crtc_active_planes(new_crtc_state);
6297 }
6298
6299 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6300                                        struct drm_atomic_state *state)
6301 {
6302         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6303                                                                           crtc);
6304         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6305         struct dc *dc = adev->dm.dc;
6306         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6307         int ret = -EINVAL;
6308
6309         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6310
6311         dm_update_crtc_active_planes(crtc, crtc_state);
6312
6313         if (unlikely(!dm_crtc_state->stream &&
6314                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6315                 WARN_ON(1);
6316                 return ret;
6317         }
6318
6319         /*
6320          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6321          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6322          * planes are disabled, which is not supported by the hardware. And there is legacy
6323          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6324          */
6325         if (crtc_state->enable &&
6326             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6327                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6328                 return -EINVAL;
6329         }
6330
6331         /* In some use cases, like reset, no stream is attached */
6332         if (!dm_crtc_state->stream)
6333                 return 0;
6334
6335         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6336                 return 0;
6337
6338         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6339         return ret;
6340 }
6341
6342 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6343                                       const struct drm_display_mode *mode,
6344                                       struct drm_display_mode *adjusted_mode)
6345 {
6346         return true;
6347 }
6348
6349 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6350         .disable = dm_crtc_helper_disable,
6351         .atomic_check = dm_crtc_helper_atomic_check,
6352         .mode_fixup = dm_crtc_helper_mode_fixup,
6353         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6354 };
6355
6356 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6357 {
6358
6359 }
6360
6361 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6362 {
6363         switch (display_color_depth) {
6364                 case COLOR_DEPTH_666:
6365                         return 6;
6366                 case COLOR_DEPTH_888:
6367                         return 8;
6368                 case COLOR_DEPTH_101010:
6369                         return 10;
6370                 case COLOR_DEPTH_121212:
6371                         return 12;
6372                 case COLOR_DEPTH_141414:
6373                         return 14;
6374                 case COLOR_DEPTH_161616:
6375                         return 16;
6376                 default:
6377                         break;
6378                 }
6379         return 0;
6380 }
6381
6382 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6383                                           struct drm_crtc_state *crtc_state,
6384                                           struct drm_connector_state *conn_state)
6385 {
6386         struct drm_atomic_state *state = crtc_state->state;
6387         struct drm_connector *connector = conn_state->connector;
6388         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6389         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6390         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6391         struct drm_dp_mst_topology_mgr *mst_mgr;
6392         struct drm_dp_mst_port *mst_port;
6393         enum dc_color_depth color_depth;
6394         int clock, bpp = 0;
6395         bool is_y420 = false;
6396
6397         if (!aconnector->port || !aconnector->dc_sink)
6398                 return 0;
6399
6400         mst_port = aconnector->port;
6401         mst_mgr = &aconnector->mst_port->mst_mgr;
6402
6403         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6404                 return 0;
6405
6406         if (!state->duplicated) {
6407                 int max_bpc = conn_state->max_requested_bpc;
6408                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6409                                 aconnector->force_yuv420_output;
6410                 color_depth = convert_color_depth_from_display_info(connector,
6411                                                                     is_y420,
6412                                                                     max_bpc);
6413                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6414                 clock = adjusted_mode->clock;
6415                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6416         }
6417         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6418                                                                            mst_mgr,
6419                                                                            mst_port,
6420                                                                            dm_new_connector_state->pbn,
6421                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6422         if (dm_new_connector_state->vcpi_slots < 0) {
6423                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6424                 return dm_new_connector_state->vcpi_slots;
6425         }
6426         return 0;
6427 }
6428
6429 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6430         .disable = dm_encoder_helper_disable,
6431         .atomic_check = dm_encoder_helper_atomic_check
6432 };
6433
6434 #if defined(CONFIG_DRM_AMD_DC_DCN)
6435 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6436                                             struct dc_state *dc_state)
6437 {
6438         struct dc_stream_state *stream = NULL;
6439         struct drm_connector *connector;
6440         struct drm_connector_state *new_con_state, *old_con_state;
6441         struct amdgpu_dm_connector *aconnector;
6442         struct dm_connector_state *dm_conn_state;
6443         int i, j, clock, bpp;
6444         int vcpi, pbn_div, pbn = 0;
6445
6446         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6447
6448                 aconnector = to_amdgpu_dm_connector(connector);
6449
6450                 if (!aconnector->port)
6451                         continue;
6452
6453                 if (!new_con_state || !new_con_state->crtc)
6454                         continue;
6455
6456                 dm_conn_state = to_dm_connector_state(new_con_state);
6457
6458                 for (j = 0; j < dc_state->stream_count; j++) {
6459                         stream = dc_state->streams[j];
6460                         if (!stream)
6461                                 continue;
6462
6463                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6464                                 break;
6465
6466                         stream = NULL;
6467                 }
6468
6469                 if (!stream)
6470                         continue;
6471
6472                 if (stream->timing.flags.DSC != 1) {
6473                         drm_dp_mst_atomic_enable_dsc(state,
6474                                                      aconnector->port,
6475                                                      dm_conn_state->pbn,
6476                                                      0,
6477                                                      false);
6478                         continue;
6479                 }
6480
6481                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6482                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6483                 clock = stream->timing.pix_clk_100hz / 10;
6484                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6485                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6486                                                     aconnector->port,
6487                                                     pbn, pbn_div,
6488                                                     true);
6489                 if (vcpi < 0)
6490                         return vcpi;
6491
6492                 dm_conn_state->pbn = pbn;
6493                 dm_conn_state->vcpi_slots = vcpi;
6494         }
6495         return 0;
6496 }
6497 #endif
6498
6499 static void dm_drm_plane_reset(struct drm_plane *plane)
6500 {
6501         struct dm_plane_state *amdgpu_state = NULL;
6502
6503         if (plane->state)
6504                 plane->funcs->atomic_destroy_state(plane, plane->state);
6505
6506         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6507         WARN_ON(amdgpu_state == NULL);
6508
6509         if (amdgpu_state)
6510                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6511 }
6512
6513 static struct drm_plane_state *
6514 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6515 {
6516         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6517
6518         old_dm_plane_state = to_dm_plane_state(plane->state);
6519         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6520         if (!dm_plane_state)
6521                 return NULL;
6522
6523         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6524
6525         if (old_dm_plane_state->dc_state) {
6526                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6527                 dc_plane_state_retain(dm_plane_state->dc_state);
6528         }
6529
6530         return &dm_plane_state->base;
6531 }
6532
6533 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6534                                 struct drm_plane_state *state)
6535 {
6536         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6537
6538         if (dm_plane_state->dc_state)
6539                 dc_plane_state_release(dm_plane_state->dc_state);
6540
6541         drm_atomic_helper_plane_destroy_state(plane, state);
6542 }
6543
6544 static const struct drm_plane_funcs dm_plane_funcs = {
6545         .update_plane   = drm_atomic_helper_update_plane,
6546         .disable_plane  = drm_atomic_helper_disable_plane,
6547         .destroy        = drm_primary_helper_destroy,
6548         .reset = dm_drm_plane_reset,
6549         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6550         .atomic_destroy_state = dm_drm_plane_destroy_state,
6551         .format_mod_supported = dm_plane_format_mod_supported,
6552 };
6553
6554 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6555                                       struct drm_plane_state *new_state)
6556 {
6557         struct amdgpu_framebuffer *afb;
6558         struct drm_gem_object *obj;
6559         struct amdgpu_device *adev;
6560         struct amdgpu_bo *rbo;
6561         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6562         struct list_head list;
6563         struct ttm_validate_buffer tv;
6564         struct ww_acquire_ctx ticket;
6565         uint32_t domain;
6566         int r;
6567
6568         if (!new_state->fb) {
6569                 DRM_DEBUG_DRIVER("No FB bound\n");
6570                 return 0;
6571         }
6572
6573         afb = to_amdgpu_framebuffer(new_state->fb);
6574         obj = new_state->fb->obj[0];
6575         rbo = gem_to_amdgpu_bo(obj);
6576         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6577         INIT_LIST_HEAD(&list);
6578
6579         tv.bo = &rbo->tbo;
6580         tv.num_shared = 1;
6581         list_add(&tv.head, &list);
6582
6583         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6584         if (r) {
6585                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6586                 return r;
6587         }
6588
6589         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6590                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6591         else
6592                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6593
6594         r = amdgpu_bo_pin(rbo, domain);
6595         if (unlikely(r != 0)) {
6596                 if (r != -ERESTARTSYS)
6597                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6598                 ttm_eu_backoff_reservation(&ticket, &list);
6599                 return r;
6600         }
6601
6602         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6603         if (unlikely(r != 0)) {
6604                 amdgpu_bo_unpin(rbo);
6605                 ttm_eu_backoff_reservation(&ticket, &list);
6606                 DRM_ERROR("%p bind failed\n", rbo);
6607                 return r;
6608         }
6609
6610         ttm_eu_backoff_reservation(&ticket, &list);
6611
6612         afb->address = amdgpu_bo_gpu_offset(rbo);
6613
6614         amdgpu_bo_ref(rbo);
6615
6616         /**
6617          * We don't do surface updates on planes that have been newly created,
6618          * but we also don't have the afb->address during atomic check.
6619          *
6620          * Fill in buffer attributes depending on the address here, but only on
6621          * newly created planes since they're not being used by DC yet and this
6622          * won't modify global state.
6623          */
6624         dm_plane_state_old = to_dm_plane_state(plane->state);
6625         dm_plane_state_new = to_dm_plane_state(new_state);
6626
6627         if (dm_plane_state_new->dc_state &&
6628             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6629                 struct dc_plane_state *plane_state =
6630                         dm_plane_state_new->dc_state;
6631                 bool force_disable_dcc = !plane_state->dcc.enable;
6632
6633                 fill_plane_buffer_attributes(
6634                         adev, afb, plane_state->format, plane_state->rotation,
6635                         afb->tiling_flags,
6636                         &plane_state->tiling_info, &plane_state->plane_size,
6637                         &plane_state->dcc, &plane_state->address,
6638                         afb->tmz_surface, force_disable_dcc);
6639         }
6640
6641         return 0;
6642 }
6643
6644 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6645                                        struct drm_plane_state *old_state)
6646 {
6647         struct amdgpu_bo *rbo;
6648         int r;
6649
6650         if (!old_state->fb)
6651                 return;
6652
6653         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6654         r = amdgpu_bo_reserve(rbo, false);
6655         if (unlikely(r)) {
6656                 DRM_ERROR("failed to reserve rbo before unpin\n");
6657                 return;
6658         }
6659
6660         amdgpu_bo_unpin(rbo);
6661         amdgpu_bo_unreserve(rbo);
6662         amdgpu_bo_unref(&rbo);
6663 }
6664
6665 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6666                                        struct drm_crtc_state *new_crtc_state)
6667 {
6668         struct drm_framebuffer *fb = state->fb;
6669         int min_downscale, max_upscale;
6670         int min_scale = 0;
6671         int max_scale = INT_MAX;
6672
6673         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6674         if (fb && state->crtc) {
6675                 /* Validate viewport to cover the case when only the position changes */
6676                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6677                         int viewport_width = state->crtc_w;
6678                         int viewport_height = state->crtc_h;
6679
6680                         if (state->crtc_x < 0)
6681                                 viewport_width += state->crtc_x;
6682                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6683                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6684
6685                         if (state->crtc_y < 0)
6686                                 viewport_height += state->crtc_y;
6687                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6688                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6689
6690                         if (viewport_width < 0 || viewport_height < 0) {
6691                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6692                                 return -EINVAL;
6693                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6694                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6695                                 return -EINVAL;
6696                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6697                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6698                                 return -EINVAL;
6699                         }
6700
6701                 }
6702
6703                 /* Get min/max allowed scaling factors from plane caps. */
6704                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6705                                              &min_downscale, &max_upscale);
6706                 /*
6707                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6708                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6709                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6710                  */
6711                 min_scale = (1000 << 16) / max_upscale;
6712                 max_scale = (1000 << 16) / min_downscale;
6713         }
6714
6715         return drm_atomic_helper_check_plane_state(
6716                 state, new_crtc_state, min_scale, max_scale, true, true);
6717 }
6718
6719 static int dm_plane_atomic_check(struct drm_plane *plane,
6720                                  struct drm_plane_state *state)
6721 {
6722         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6723         struct dc *dc = adev->dm.dc;
6724         struct dm_plane_state *dm_plane_state;
6725         struct dc_scaling_info scaling_info;
6726         struct drm_crtc_state *new_crtc_state;
6727         int ret;
6728
6729         trace_amdgpu_dm_plane_atomic_check(state);
6730
6731         dm_plane_state = to_dm_plane_state(state);
6732
6733         if (!dm_plane_state->dc_state)
6734                 return 0;
6735
6736         new_crtc_state =
6737                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6738         if (!new_crtc_state)
6739                 return -EINVAL;
6740
6741         ret = dm_plane_helper_check_state(state, new_crtc_state);
6742         if (ret)
6743                 return ret;
6744
6745         ret = fill_dc_scaling_info(state, &scaling_info);
6746         if (ret)
6747                 return ret;
6748
6749         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6750                 return 0;
6751
6752         return -EINVAL;
6753 }
6754
6755 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6756                                        struct drm_plane_state *new_plane_state)
6757 {
6758         /* Only support async updates on cursor planes. */
6759         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6760                 return -EINVAL;
6761
6762         return 0;
6763 }
6764
6765 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6766                                          struct drm_plane_state *new_state)
6767 {
6768         struct drm_plane_state *old_state =
6769                 drm_atomic_get_old_plane_state(new_state->state, plane);
6770
6771         trace_amdgpu_dm_atomic_update_cursor(new_state);
6772
6773         swap(plane->state->fb, new_state->fb);
6774
6775         plane->state->src_x = new_state->src_x;
6776         plane->state->src_y = new_state->src_y;
6777         plane->state->src_w = new_state->src_w;
6778         plane->state->src_h = new_state->src_h;
6779         plane->state->crtc_x = new_state->crtc_x;
6780         plane->state->crtc_y = new_state->crtc_y;
6781         plane->state->crtc_w = new_state->crtc_w;
6782         plane->state->crtc_h = new_state->crtc_h;
6783
6784         handle_cursor_update(plane, old_state);
6785 }
6786
6787 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6788         .prepare_fb = dm_plane_helper_prepare_fb,
6789         .cleanup_fb = dm_plane_helper_cleanup_fb,
6790         .atomic_check = dm_plane_atomic_check,
6791         .atomic_async_check = dm_plane_atomic_async_check,
6792         .atomic_async_update = dm_plane_atomic_async_update
6793 };
6794
6795 /*
6796  * TODO: these are currently initialized to rgb formats only.
6797  * For future use cases we should either initialize them dynamically based on
6798  * plane capabilities, or initialize this array to all formats, so internal drm
6799  * check will succeed, and let DC implement proper check
6800  */
6801 static const uint32_t rgb_formats[] = {
6802         DRM_FORMAT_XRGB8888,
6803         DRM_FORMAT_ARGB8888,
6804         DRM_FORMAT_RGBA8888,
6805         DRM_FORMAT_XRGB2101010,
6806         DRM_FORMAT_XBGR2101010,
6807         DRM_FORMAT_ARGB2101010,
6808         DRM_FORMAT_ABGR2101010,
6809         DRM_FORMAT_XBGR8888,
6810         DRM_FORMAT_ABGR8888,
6811         DRM_FORMAT_RGB565,
6812 };
6813
6814 static const uint32_t overlay_formats[] = {
6815         DRM_FORMAT_XRGB8888,
6816         DRM_FORMAT_ARGB8888,
6817         DRM_FORMAT_RGBA8888,
6818         DRM_FORMAT_XBGR8888,
6819         DRM_FORMAT_ABGR8888,
6820         DRM_FORMAT_RGB565
6821 };
6822
6823 static const u32 cursor_formats[] = {
6824         DRM_FORMAT_ARGB8888
6825 };
6826
6827 static int get_plane_formats(const struct drm_plane *plane,
6828                              const struct dc_plane_cap *plane_cap,
6829                              uint32_t *formats, int max_formats)
6830 {
6831         int i, num_formats = 0;
6832
6833         /*
6834          * TODO: Query support for each group of formats directly from
6835          * DC plane caps. This will require adding more formats to the
6836          * caps list.
6837          */
6838
6839         switch (plane->type) {
6840         case DRM_PLANE_TYPE_PRIMARY:
6841                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6842                         if (num_formats >= max_formats)
6843                                 break;
6844
6845                         formats[num_formats++] = rgb_formats[i];
6846                 }
6847
6848                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6849                         formats[num_formats++] = DRM_FORMAT_NV12;
6850                 if (plane_cap && plane_cap->pixel_format_support.p010)
6851                         formats[num_formats++] = DRM_FORMAT_P010;
6852                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6853                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6854                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6855                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6856                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6857                 }
6858                 break;
6859
6860         case DRM_PLANE_TYPE_OVERLAY:
6861                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6862                         if (num_formats >= max_formats)
6863                                 break;
6864
6865                         formats[num_formats++] = overlay_formats[i];
6866                 }
6867                 break;
6868
6869         case DRM_PLANE_TYPE_CURSOR:
6870                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6871                         if (num_formats >= max_formats)
6872                                 break;
6873
6874                         formats[num_formats++] = cursor_formats[i];
6875                 }
6876                 break;
6877         }
6878
6879         return num_formats;
6880 }
6881
6882 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6883                                 struct drm_plane *plane,
6884                                 unsigned long possible_crtcs,
6885                                 const struct dc_plane_cap *plane_cap)
6886 {
6887         uint32_t formats[32];
6888         int num_formats;
6889         int res = -EPERM;
6890         unsigned int supported_rotations;
6891         uint64_t *modifiers = NULL;
6892
6893         num_formats = get_plane_formats(plane, plane_cap, formats,
6894                                         ARRAY_SIZE(formats));
6895
6896         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6897         if (res)
6898                 return res;
6899
6900         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6901                                        &dm_plane_funcs, formats, num_formats,
6902                                        modifiers, plane->type, NULL);
6903         kfree(modifiers);
6904         if (res)
6905                 return res;
6906
6907         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6908             plane_cap && plane_cap->per_pixel_alpha) {
6909                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6910                                           BIT(DRM_MODE_BLEND_PREMULTI);
6911
6912                 drm_plane_create_alpha_property(plane);
6913                 drm_plane_create_blend_mode_property(plane, blend_caps);
6914         }
6915
6916         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6917             plane_cap &&
6918             (plane_cap->pixel_format_support.nv12 ||
6919              plane_cap->pixel_format_support.p010)) {
6920                 /* This only affects YUV formats. */
6921                 drm_plane_create_color_properties(
6922                         plane,
6923                         BIT(DRM_COLOR_YCBCR_BT601) |
6924                         BIT(DRM_COLOR_YCBCR_BT709) |
6925                         BIT(DRM_COLOR_YCBCR_BT2020),
6926                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6927                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6928                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6929         }
6930
6931         supported_rotations =
6932                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6933                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6934
6935         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6936             plane->type != DRM_PLANE_TYPE_CURSOR)
6937                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6938                                                    supported_rotations);
6939
6940         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6941
6942         /* Create (reset) the plane state */
6943         if (plane->funcs->reset)
6944                 plane->funcs->reset(plane);
6945
6946         return 0;
6947 }
6948
6949 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6950                                struct drm_plane *plane,
6951                                uint32_t crtc_index)
6952 {
6953         struct amdgpu_crtc *acrtc = NULL;
6954         struct drm_plane *cursor_plane;
6955
6956         int res = -ENOMEM;
6957
6958         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6959         if (!cursor_plane)
6960                 goto fail;
6961
6962         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6963         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6964
6965         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6966         if (!acrtc)
6967                 goto fail;
6968
6969         res = drm_crtc_init_with_planes(
6970                         dm->ddev,
6971                         &acrtc->base,
6972                         plane,
6973                         cursor_plane,
6974                         &amdgpu_dm_crtc_funcs, NULL);
6975
6976         if (res)
6977                 goto fail;
6978
6979         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6980
6981         /* Create (reset) the plane state */
6982         if (acrtc->base.funcs->reset)
6983                 acrtc->base.funcs->reset(&acrtc->base);
6984
6985         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6986         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6987
6988         acrtc->crtc_id = crtc_index;
6989         acrtc->base.enabled = false;
6990         acrtc->otg_inst = -1;
6991
6992         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6993         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6994                                    true, MAX_COLOR_LUT_ENTRIES);
6995         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6996
6997         return 0;
6998
6999 fail:
7000         kfree(acrtc);
7001         kfree(cursor_plane);
7002         return res;
7003 }
7004
7005
7006 static int to_drm_connector_type(enum signal_type st)
7007 {
7008         switch (st) {
7009         case SIGNAL_TYPE_HDMI_TYPE_A:
7010                 return DRM_MODE_CONNECTOR_HDMIA;
7011         case SIGNAL_TYPE_EDP:
7012                 return DRM_MODE_CONNECTOR_eDP;
7013         case SIGNAL_TYPE_LVDS:
7014                 return DRM_MODE_CONNECTOR_LVDS;
7015         case SIGNAL_TYPE_RGB:
7016                 return DRM_MODE_CONNECTOR_VGA;
7017         case SIGNAL_TYPE_DISPLAY_PORT:
7018         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7019                 return DRM_MODE_CONNECTOR_DisplayPort;
7020         case SIGNAL_TYPE_DVI_DUAL_LINK:
7021         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7022                 return DRM_MODE_CONNECTOR_DVID;
7023         case SIGNAL_TYPE_VIRTUAL:
7024                 return DRM_MODE_CONNECTOR_VIRTUAL;
7025
7026         default:
7027                 return DRM_MODE_CONNECTOR_Unknown;
7028         }
7029 }
7030
7031 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7032 {
7033         struct drm_encoder *encoder;
7034
7035         /* There is only one encoder per connector */
7036         drm_connector_for_each_possible_encoder(connector, encoder)
7037                 return encoder;
7038
7039         return NULL;
7040 }
7041
7042 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7043 {
7044         struct drm_encoder *encoder;
7045         struct amdgpu_encoder *amdgpu_encoder;
7046
7047         encoder = amdgpu_dm_connector_to_encoder(connector);
7048
7049         if (encoder == NULL)
7050                 return;
7051
7052         amdgpu_encoder = to_amdgpu_encoder(encoder);
7053
7054         amdgpu_encoder->native_mode.clock = 0;
7055
7056         if (!list_empty(&connector->probed_modes)) {
7057                 struct drm_display_mode *preferred_mode = NULL;
7058
7059                 list_for_each_entry(preferred_mode,
7060                                     &connector->probed_modes,
7061                                     head) {
7062                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7063                                 amdgpu_encoder->native_mode = *preferred_mode;
7064
7065                         break;
7066                 }
7067
7068         }
7069 }
7070
7071 static struct drm_display_mode *
7072 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7073                              char *name,
7074                              int hdisplay, int vdisplay)
7075 {
7076         struct drm_device *dev = encoder->dev;
7077         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7078         struct drm_display_mode *mode = NULL;
7079         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7080
7081         mode = drm_mode_duplicate(dev, native_mode);
7082
7083         if (mode == NULL)
7084                 return NULL;
7085
7086         mode->hdisplay = hdisplay;
7087         mode->vdisplay = vdisplay;
7088         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7089         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7090
7091         return mode;
7092
7093 }
7094
7095 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7096                                                  struct drm_connector *connector)
7097 {
7098         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7099         struct drm_display_mode *mode = NULL;
7100         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7101         struct amdgpu_dm_connector *amdgpu_dm_connector =
7102                                 to_amdgpu_dm_connector(connector);
7103         int i;
7104         int n;
7105         struct mode_size {
7106                 char name[DRM_DISPLAY_MODE_LEN];
7107                 int w;
7108                 int h;
7109         } common_modes[] = {
7110                 {  "640x480",  640,  480},
7111                 {  "800x600",  800,  600},
7112                 { "1024x768", 1024,  768},
7113                 { "1280x720", 1280,  720},
7114                 { "1280x800", 1280,  800},
7115                 {"1280x1024", 1280, 1024},
7116                 { "1440x900", 1440,  900},
7117                 {"1680x1050", 1680, 1050},
7118                 {"1600x1200", 1600, 1200},
7119                 {"1920x1080", 1920, 1080},
7120                 {"1920x1200", 1920, 1200}
7121         };
7122
7123         n = ARRAY_SIZE(common_modes);
7124
7125         for (i = 0; i < n; i++) {
7126                 struct drm_display_mode *curmode = NULL;
7127                 bool mode_existed = false;
7128
7129                 if (common_modes[i].w > native_mode->hdisplay ||
7130                     common_modes[i].h > native_mode->vdisplay ||
7131                    (common_modes[i].w == native_mode->hdisplay &&
7132                     common_modes[i].h == native_mode->vdisplay))
7133                         continue;
7134
7135                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7136                         if (common_modes[i].w == curmode->hdisplay &&
7137                             common_modes[i].h == curmode->vdisplay) {
7138                                 mode_existed = true;
7139                                 break;
7140                         }
7141                 }
7142
7143                 if (mode_existed)
7144                         continue;
7145
7146                 mode = amdgpu_dm_create_common_mode(encoder,
7147                                 common_modes[i].name, common_modes[i].w,
7148                                 common_modes[i].h);
7149                 drm_mode_probed_add(connector, mode);
7150                 amdgpu_dm_connector->num_modes++;
7151         }
7152 }
7153
7154 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7155                                               struct edid *edid)
7156 {
7157         struct amdgpu_dm_connector *amdgpu_dm_connector =
7158                         to_amdgpu_dm_connector(connector);
7159
7160         if (edid) {
7161                 /* empty probed_modes */
7162                 INIT_LIST_HEAD(&connector->probed_modes);
7163                 amdgpu_dm_connector->num_modes =
7164                                 drm_add_edid_modes(connector, edid);
7165
7166                 /* sorting the probed modes before calling function
7167                  * amdgpu_dm_get_native_mode() since EDID can have
7168                  * more than one preferred mode. The modes that are
7169                  * later in the probed mode list could be of higher
7170                  * and preferred resolution. For example, 3840x2160
7171                  * resolution in base EDID preferred timing and 4096x2160
7172                  * preferred resolution in DID extension block later.
7173                  */
7174                 drm_mode_sort(&connector->probed_modes);
7175                 amdgpu_dm_get_native_mode(connector);
7176
7177                 /* Freesync capabilities are reset by calling
7178                  * drm_add_edid_modes() and need to be
7179                  * restored here.
7180                  */
7181                 amdgpu_dm_update_freesync_caps(connector, edid);
7182         } else {
7183                 amdgpu_dm_connector->num_modes = 0;
7184         }
7185 }
7186
7187 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7188                               struct drm_display_mode *mode)
7189 {
7190         struct drm_display_mode *m;
7191
7192         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7193                 if (drm_mode_equal(m, mode))
7194                         return true;
7195         }
7196
7197         return false;
7198 }
7199
7200 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7201 {
7202         const struct drm_display_mode *m;
7203         struct drm_display_mode *new_mode;
7204         uint i;
7205         uint32_t new_modes_count = 0;
7206
7207         /* Standard FPS values
7208          *
7209          * 23.976   - TV/NTSC
7210          * 24       - Cinema
7211          * 25       - TV/PAL
7212          * 29.97    - TV/NTSC
7213          * 30       - TV/NTSC
7214          * 48       - Cinema HFR
7215          * 50       - TV/PAL
7216          * 60       - Commonly used
7217          * 48,72,96 - Multiples of 24
7218          */
7219         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7220                                          48000, 50000, 60000, 72000, 96000 };
7221
7222         /*
7223          * Find mode with highest refresh rate with the same resolution
7224          * as the preferred mode. Some monitors report a preferred mode
7225          * with lower resolution than the highest refresh rate supported.
7226          */
7227
7228         m = get_highest_refresh_rate_mode(aconnector, true);
7229         if (!m)
7230                 return 0;
7231
7232         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7233                 uint64_t target_vtotal, target_vtotal_diff;
7234                 uint64_t num, den;
7235
7236                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7237                         continue;
7238
7239                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7240                     common_rates[i] > aconnector->max_vfreq * 1000)
7241                         continue;
7242
7243                 num = (unsigned long long)m->clock * 1000 * 1000;
7244                 den = common_rates[i] * (unsigned long long)m->htotal;
7245                 target_vtotal = div_u64(num, den);
7246                 target_vtotal_diff = target_vtotal - m->vtotal;
7247
7248                 /* Check for illegal modes */
7249                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7250                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7251                     m->vtotal + target_vtotal_diff < m->vsync_end)
7252                         continue;
7253
7254                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7255                 if (!new_mode)
7256                         goto out;
7257
7258                 new_mode->vtotal += (u16)target_vtotal_diff;
7259                 new_mode->vsync_start += (u16)target_vtotal_diff;
7260                 new_mode->vsync_end += (u16)target_vtotal_diff;
7261                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7262                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7263
7264                 if (!is_duplicate_mode(aconnector, new_mode)) {
7265                         drm_mode_probed_add(&aconnector->base, new_mode);
7266                         new_modes_count += 1;
7267                 } else
7268                         drm_mode_destroy(aconnector->base.dev, new_mode);
7269         }
7270  out:
7271         return new_modes_count;
7272 }
7273
7274 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7275                                                    struct edid *edid)
7276 {
7277         struct amdgpu_dm_connector *amdgpu_dm_connector =
7278                 to_amdgpu_dm_connector(connector);
7279
7280         if (!(amdgpu_freesync_vid_mode && edid))
7281                 return;
7282         
7283         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7284                 amdgpu_dm_connector->num_modes +=
7285                         add_fs_modes(amdgpu_dm_connector);
7286 }
7287
7288 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7289 {
7290         struct amdgpu_dm_connector *amdgpu_dm_connector =
7291                         to_amdgpu_dm_connector(connector);
7292         struct drm_encoder *encoder;
7293         struct edid *edid = amdgpu_dm_connector->edid;
7294
7295         encoder = amdgpu_dm_connector_to_encoder(connector);
7296
7297         if (!drm_edid_is_valid(edid)) {
7298                 amdgpu_dm_connector->num_modes =
7299                                 drm_add_modes_noedid(connector, 640, 480);
7300         } else {
7301                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7302                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7303                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7304         }
7305         amdgpu_dm_fbc_init(connector);
7306
7307         return amdgpu_dm_connector->num_modes;
7308 }
7309
7310 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7311                                      struct amdgpu_dm_connector *aconnector,
7312                                      int connector_type,
7313                                      struct dc_link *link,
7314                                      int link_index)
7315 {
7316         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7317
7318         /*
7319          * Some of the properties below require access to state, like bpc.
7320          * Allocate some default initial connector state with our reset helper.
7321          */
7322         if (aconnector->base.funcs->reset)
7323                 aconnector->base.funcs->reset(&aconnector->base);
7324
7325         aconnector->connector_id = link_index;
7326         aconnector->dc_link = link;
7327         aconnector->base.interlace_allowed = false;
7328         aconnector->base.doublescan_allowed = false;
7329         aconnector->base.stereo_allowed = false;
7330         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7331         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7332         aconnector->audio_inst = -1;
7333         mutex_init(&aconnector->hpd_lock);
7334
7335         /*
7336          * configure support HPD hot plug connector_>polled default value is 0
7337          * which means HPD hot plug not supported
7338          */
7339         switch (connector_type) {
7340         case DRM_MODE_CONNECTOR_HDMIA:
7341                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7342                 aconnector->base.ycbcr_420_allowed =
7343                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7344                 break;
7345         case DRM_MODE_CONNECTOR_DisplayPort:
7346                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7347                 aconnector->base.ycbcr_420_allowed =
7348                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7349                 break;
7350         case DRM_MODE_CONNECTOR_DVID:
7351                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7352                 break;
7353         default:
7354                 break;
7355         }
7356
7357         drm_object_attach_property(&aconnector->base.base,
7358                                 dm->ddev->mode_config.scaling_mode_property,
7359                                 DRM_MODE_SCALE_NONE);
7360
7361         drm_object_attach_property(&aconnector->base.base,
7362                                 adev->mode_info.underscan_property,
7363                                 UNDERSCAN_OFF);
7364         drm_object_attach_property(&aconnector->base.base,
7365                                 adev->mode_info.underscan_hborder_property,
7366                                 0);
7367         drm_object_attach_property(&aconnector->base.base,
7368                                 adev->mode_info.underscan_vborder_property,
7369                                 0);
7370
7371         if (!aconnector->mst_port)
7372                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7373
7374         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7375         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7376         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7377
7378         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7379             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7380                 drm_object_attach_property(&aconnector->base.base,
7381                                 adev->mode_info.abm_level_property, 0);
7382         }
7383
7384         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7385             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7386             connector_type == DRM_MODE_CONNECTOR_eDP) {
7387                 drm_object_attach_property(
7388                         &aconnector->base.base,
7389                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7390
7391                 if (!aconnector->mst_port)
7392                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7393
7394 #ifdef CONFIG_DRM_AMD_DC_HDCP
7395                 if (adev->dm.hdcp_workqueue)
7396                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7397 #endif
7398         }
7399 }
7400
7401 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7402                               struct i2c_msg *msgs, int num)
7403 {
7404         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7405         struct ddc_service *ddc_service = i2c->ddc_service;
7406         struct i2c_command cmd;
7407         int i;
7408         int result = -EIO;
7409
7410         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7411
7412         if (!cmd.payloads)
7413                 return result;
7414
7415         cmd.number_of_payloads = num;
7416         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7417         cmd.speed = 100;
7418
7419         for (i = 0; i < num; i++) {
7420                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7421                 cmd.payloads[i].address = msgs[i].addr;
7422                 cmd.payloads[i].length = msgs[i].len;
7423                 cmd.payloads[i].data = msgs[i].buf;
7424         }
7425
7426         if (dc_submit_i2c(
7427                         ddc_service->ctx->dc,
7428                         ddc_service->ddc_pin->hw_info.ddc_channel,
7429                         &cmd))
7430                 result = num;
7431
7432         kfree(cmd.payloads);
7433         return result;
7434 }
7435
7436 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7437 {
7438         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7439 }
7440
7441 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7442         .master_xfer = amdgpu_dm_i2c_xfer,
7443         .functionality = amdgpu_dm_i2c_func,
7444 };
7445
7446 static struct amdgpu_i2c_adapter *
7447 create_i2c(struct ddc_service *ddc_service,
7448            int link_index,
7449            int *res)
7450 {
7451         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7452         struct amdgpu_i2c_adapter *i2c;
7453
7454         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7455         if (!i2c)
7456                 return NULL;
7457         i2c->base.owner = THIS_MODULE;
7458         i2c->base.class = I2C_CLASS_DDC;
7459         i2c->base.dev.parent = &adev->pdev->dev;
7460         i2c->base.algo = &amdgpu_dm_i2c_algo;
7461         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7462         i2c_set_adapdata(&i2c->base, i2c);
7463         i2c->ddc_service = ddc_service;
7464         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7465
7466         return i2c;
7467 }
7468
7469
7470 /*
7471  * Note: this function assumes that dc_link_detect() was called for the
7472  * dc_link which will be represented by this aconnector.
7473  */
7474 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7475                                     struct amdgpu_dm_connector *aconnector,
7476                                     uint32_t link_index,
7477                                     struct amdgpu_encoder *aencoder)
7478 {
7479         int res = 0;
7480         int connector_type;
7481         struct dc *dc = dm->dc;
7482         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7483         struct amdgpu_i2c_adapter *i2c;
7484
7485         link->priv = aconnector;
7486
7487         DRM_DEBUG_DRIVER("%s()\n", __func__);
7488
7489         i2c = create_i2c(link->ddc, link->link_index, &res);
7490         if (!i2c) {
7491                 DRM_ERROR("Failed to create i2c adapter data\n");
7492                 return -ENOMEM;
7493         }
7494
7495         aconnector->i2c = i2c;
7496         res = i2c_add_adapter(&i2c->base);
7497
7498         if (res) {
7499                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7500                 goto out_free;
7501         }
7502
7503         connector_type = to_drm_connector_type(link->connector_signal);
7504
7505         res = drm_connector_init_with_ddc(
7506                         dm->ddev,
7507                         &aconnector->base,
7508                         &amdgpu_dm_connector_funcs,
7509                         connector_type,
7510                         &i2c->base);
7511
7512         if (res) {
7513                 DRM_ERROR("connector_init failed\n");
7514                 aconnector->connector_id = -1;
7515                 goto out_free;
7516         }
7517
7518         drm_connector_helper_add(
7519                         &aconnector->base,
7520                         &amdgpu_dm_connector_helper_funcs);
7521
7522         amdgpu_dm_connector_init_helper(
7523                 dm,
7524                 aconnector,
7525                 connector_type,
7526                 link,
7527                 link_index);
7528
7529         drm_connector_attach_encoder(
7530                 &aconnector->base, &aencoder->base);
7531
7532         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7533                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7534                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7535
7536 out_free:
7537         if (res) {
7538                 kfree(i2c);
7539                 aconnector->i2c = NULL;
7540         }
7541         return res;
7542 }
7543
7544 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7545 {
7546         switch (adev->mode_info.num_crtc) {
7547         case 1:
7548                 return 0x1;
7549         case 2:
7550                 return 0x3;
7551         case 3:
7552                 return 0x7;
7553         case 4:
7554                 return 0xf;
7555         case 5:
7556                 return 0x1f;
7557         case 6:
7558         default:
7559                 return 0x3f;
7560         }
7561 }
7562
7563 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7564                                   struct amdgpu_encoder *aencoder,
7565                                   uint32_t link_index)
7566 {
7567         struct amdgpu_device *adev = drm_to_adev(dev);
7568
7569         int res = drm_encoder_init(dev,
7570                                    &aencoder->base,
7571                                    &amdgpu_dm_encoder_funcs,
7572                                    DRM_MODE_ENCODER_TMDS,
7573                                    NULL);
7574
7575         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7576
7577         if (!res)
7578                 aencoder->encoder_id = link_index;
7579         else
7580                 aencoder->encoder_id = -1;
7581
7582         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7583
7584         return res;
7585 }
7586
7587 static void manage_dm_interrupts(struct amdgpu_device *adev,
7588                                  struct amdgpu_crtc *acrtc,
7589                                  bool enable)
7590 {
7591         /*
7592          * We have no guarantee that the frontend index maps to the same
7593          * backend index - some even map to more than one.
7594          *
7595          * TODO: Use a different interrupt or check DC itself for the mapping.
7596          */
7597         int irq_type =
7598                 amdgpu_display_crtc_idx_to_irq_type(
7599                         adev,
7600                         acrtc->crtc_id);
7601
7602         if (enable) {
7603                 drm_crtc_vblank_on(&acrtc->base);
7604                 amdgpu_irq_get(
7605                         adev,
7606                         &adev->pageflip_irq,
7607                         irq_type);
7608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7609                 amdgpu_irq_get(
7610                         adev,
7611                         &adev->vline0_irq,
7612                         irq_type);
7613 #endif
7614         } else {
7615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7616                 amdgpu_irq_put(
7617                         adev,
7618                         &adev->vline0_irq,
7619                         irq_type);
7620 #endif
7621                 amdgpu_irq_put(
7622                         adev,
7623                         &adev->pageflip_irq,
7624                         irq_type);
7625                 drm_crtc_vblank_off(&acrtc->base);
7626         }
7627 }
7628
7629 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7630                                       struct amdgpu_crtc *acrtc)
7631 {
7632         int irq_type =
7633                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7634
7635         /**
7636          * This reads the current state for the IRQ and force reapplies
7637          * the setting to hardware.
7638          */
7639         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7640 }
7641
7642 static bool
7643 is_scaling_state_different(const struct dm_connector_state *dm_state,
7644                            const struct dm_connector_state *old_dm_state)
7645 {
7646         if (dm_state->scaling != old_dm_state->scaling)
7647                 return true;
7648         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7649                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7650                         return true;
7651         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7652                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7653                         return true;
7654         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7655                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7656                 return true;
7657         return false;
7658 }
7659
7660 #ifdef CONFIG_DRM_AMD_DC_HDCP
7661 static bool is_content_protection_different(struct drm_connector_state *state,
7662                                             const struct drm_connector_state *old_state,
7663                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7664 {
7665         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7666         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7667
7668         /* Handle: Type0/1 change */
7669         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7670             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7671                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7672                 return true;
7673         }
7674
7675         /* CP is being re enabled, ignore this
7676          *
7677          * Handles:     ENABLED -> DESIRED
7678          */
7679         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7680             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7681                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7682                 return false;
7683         }
7684
7685         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7686          *
7687          * Handles:     UNDESIRED -> ENABLED
7688          */
7689         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7690             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7691                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7692
7693         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7694          * hot-plug, headless s3, dpms
7695          *
7696          * Handles:     DESIRED -> DESIRED (Special case)
7697          */
7698         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7699             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7700                 dm_con_state->update_hdcp = false;
7701                 return true;
7702         }
7703
7704         /*
7705          * Handles:     UNDESIRED -> UNDESIRED
7706          *              DESIRED -> DESIRED
7707          *              ENABLED -> ENABLED
7708          */
7709         if (old_state->content_protection == state->content_protection)
7710                 return false;
7711
7712         /*
7713          * Handles:     UNDESIRED -> DESIRED
7714          *              DESIRED -> UNDESIRED
7715          *              ENABLED -> UNDESIRED
7716          */
7717         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7718                 return true;
7719
7720         /*
7721          * Handles:     DESIRED -> ENABLED
7722          */
7723         return false;
7724 }
7725
7726 #endif
7727 static void remove_stream(struct amdgpu_device *adev,
7728                           struct amdgpu_crtc *acrtc,
7729                           struct dc_stream_state *stream)
7730 {
7731         /* this is the update mode case */
7732
7733         acrtc->otg_inst = -1;
7734         acrtc->enabled = false;
7735 }
7736
7737 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7738                                struct dc_cursor_position *position)
7739 {
7740         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7741         int x, y;
7742         int xorigin = 0, yorigin = 0;
7743
7744         if (!crtc || !plane->state->fb)
7745                 return 0;
7746
7747         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7748             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7749                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7750                           __func__,
7751                           plane->state->crtc_w,
7752                           plane->state->crtc_h);
7753                 return -EINVAL;
7754         }
7755
7756         x = plane->state->crtc_x;
7757         y = plane->state->crtc_y;
7758
7759         if (x <= -amdgpu_crtc->max_cursor_width ||
7760             y <= -amdgpu_crtc->max_cursor_height)
7761                 return 0;
7762
7763         if (x < 0) {
7764                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7765                 x = 0;
7766         }
7767         if (y < 0) {
7768                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7769                 y = 0;
7770         }
7771         position->enable = true;
7772         position->translate_by_source = true;
7773         position->x = x;
7774         position->y = y;
7775         position->x_hotspot = xorigin;
7776         position->y_hotspot = yorigin;
7777
7778         return 0;
7779 }
7780
7781 static void handle_cursor_update(struct drm_plane *plane,
7782                                  struct drm_plane_state *old_plane_state)
7783 {
7784         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7785         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7786         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7787         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7788         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7789         uint64_t address = afb ? afb->address : 0;
7790         struct dc_cursor_position position = {0};
7791         struct dc_cursor_attributes attributes;
7792         int ret;
7793
7794         if (!plane->state->fb && !old_plane_state->fb)
7795                 return;
7796
7797         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7798                          __func__,
7799                          amdgpu_crtc->crtc_id,
7800                          plane->state->crtc_w,
7801                          plane->state->crtc_h);
7802
7803         ret = get_cursor_position(plane, crtc, &position);
7804         if (ret)
7805                 return;
7806
7807         if (!position.enable) {
7808                 /* turn off cursor */
7809                 if (crtc_state && crtc_state->stream) {
7810                         mutex_lock(&adev->dm.dc_lock);
7811                         dc_stream_set_cursor_position(crtc_state->stream,
7812                                                       &position);
7813                         mutex_unlock(&adev->dm.dc_lock);
7814                 }
7815                 return;
7816         }
7817
7818         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7819         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7820
7821         memset(&attributes, 0, sizeof(attributes));
7822         attributes.address.high_part = upper_32_bits(address);
7823         attributes.address.low_part  = lower_32_bits(address);
7824         attributes.width             = plane->state->crtc_w;
7825         attributes.height            = plane->state->crtc_h;
7826         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7827         attributes.rotation_angle    = 0;
7828         attributes.attribute_flags.value = 0;
7829
7830         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7831
7832         if (crtc_state->stream) {
7833                 mutex_lock(&adev->dm.dc_lock);
7834                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7835                                                          &attributes))
7836                         DRM_ERROR("DC failed to set cursor attributes\n");
7837
7838                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7839                                                    &position))
7840                         DRM_ERROR("DC failed to set cursor position\n");
7841                 mutex_unlock(&adev->dm.dc_lock);
7842         }
7843 }
7844
7845 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7846 {
7847
7848         assert_spin_locked(&acrtc->base.dev->event_lock);
7849         WARN_ON(acrtc->event);
7850
7851         acrtc->event = acrtc->base.state->event;
7852
7853         /* Set the flip status */
7854         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7855
7856         /* Mark this event as consumed */
7857         acrtc->base.state->event = NULL;
7858
7859         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7860                                                  acrtc->crtc_id);
7861 }
7862
7863 static void update_freesync_state_on_stream(
7864         struct amdgpu_display_manager *dm,
7865         struct dm_crtc_state *new_crtc_state,
7866         struct dc_stream_state *new_stream,
7867         struct dc_plane_state *surface,
7868         u32 flip_timestamp_in_us)
7869 {
7870         struct mod_vrr_params vrr_params;
7871         struct dc_info_packet vrr_infopacket = {0};
7872         struct amdgpu_device *adev = dm->adev;
7873         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7874         unsigned long flags;
7875         bool pack_sdp_v1_3 = false;
7876
7877         if (!new_stream)
7878                 return;
7879
7880         /*
7881          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7882          * For now it's sufficient to just guard against these conditions.
7883          */
7884
7885         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7886                 return;
7887
7888         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7889         vrr_params = acrtc->dm_irq_params.vrr_params;
7890
7891         if (surface) {
7892                 mod_freesync_handle_preflip(
7893                         dm->freesync_module,
7894                         surface,
7895                         new_stream,
7896                         flip_timestamp_in_us,
7897                         &vrr_params);
7898
7899                 if (adev->family < AMDGPU_FAMILY_AI &&
7900                     amdgpu_dm_vrr_active(new_crtc_state)) {
7901                         mod_freesync_handle_v_update(dm->freesync_module,
7902                                                      new_stream, &vrr_params);
7903
7904                         /* Need to call this before the frame ends. */
7905                         dc_stream_adjust_vmin_vmax(dm->dc,
7906                                                    new_crtc_state->stream,
7907                                                    &vrr_params.adjust);
7908                 }
7909         }
7910
7911         mod_freesync_build_vrr_infopacket(
7912                 dm->freesync_module,
7913                 new_stream,
7914                 &vrr_params,
7915                 PACKET_TYPE_VRR,
7916                 TRANSFER_FUNC_UNKNOWN,
7917                 &vrr_infopacket,
7918                 pack_sdp_v1_3);
7919
7920         new_crtc_state->freesync_timing_changed |=
7921                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7922                         &vrr_params.adjust,
7923                         sizeof(vrr_params.adjust)) != 0);
7924
7925         new_crtc_state->freesync_vrr_info_changed |=
7926                 (memcmp(&new_crtc_state->vrr_infopacket,
7927                         &vrr_infopacket,
7928                         sizeof(vrr_infopacket)) != 0);
7929
7930         acrtc->dm_irq_params.vrr_params = vrr_params;
7931         new_crtc_state->vrr_infopacket = vrr_infopacket;
7932
7933         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7934         new_stream->vrr_infopacket = vrr_infopacket;
7935
7936         if (new_crtc_state->freesync_vrr_info_changed)
7937                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7938                               new_crtc_state->base.crtc->base.id,
7939                               (int)new_crtc_state->base.vrr_enabled,
7940                               (int)vrr_params.state);
7941
7942         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7943 }
7944
7945 static void update_stream_irq_parameters(
7946         struct amdgpu_display_manager *dm,
7947         struct dm_crtc_state *new_crtc_state)
7948 {
7949         struct dc_stream_state *new_stream = new_crtc_state->stream;
7950         struct mod_vrr_params vrr_params;
7951         struct mod_freesync_config config = new_crtc_state->freesync_config;
7952         struct amdgpu_device *adev = dm->adev;
7953         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7954         unsigned long flags;
7955
7956         if (!new_stream)
7957                 return;
7958
7959         /*
7960          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7961          * For now it's sufficient to just guard against these conditions.
7962          */
7963         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7964                 return;
7965
7966         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7967         vrr_params = acrtc->dm_irq_params.vrr_params;
7968
7969         if (new_crtc_state->vrr_supported &&
7970             config.min_refresh_in_uhz &&
7971             config.max_refresh_in_uhz) {
7972                 /*
7973                  * if freesync compatible mode was set, config.state will be set
7974                  * in atomic check
7975                  */
7976                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7977                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7978                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7979                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7980                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7981                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7982                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7983                 } else {
7984                         config.state = new_crtc_state->base.vrr_enabled ?
7985                                                      VRR_STATE_ACTIVE_VARIABLE :
7986                                                      VRR_STATE_INACTIVE;
7987                 }
7988         } else {
7989                 config.state = VRR_STATE_UNSUPPORTED;
7990         }
7991
7992         mod_freesync_build_vrr_params(dm->freesync_module,
7993                                       new_stream,
7994                                       &config, &vrr_params);
7995
7996         new_crtc_state->freesync_timing_changed |=
7997                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7998                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7999
8000         new_crtc_state->freesync_config = config;
8001         /* Copy state for access from DM IRQ handler */
8002         acrtc->dm_irq_params.freesync_config = config;
8003         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8004         acrtc->dm_irq_params.vrr_params = vrr_params;
8005         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8006 }
8007
8008 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8009                                             struct dm_crtc_state *new_state)
8010 {
8011         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8012         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8013
8014         if (!old_vrr_active && new_vrr_active) {
8015                 /* Transition VRR inactive -> active:
8016                  * While VRR is active, we must not disable vblank irq, as a
8017                  * reenable after disable would compute bogus vblank/pflip
8018                  * timestamps if it likely happened inside display front-porch.
8019                  *
8020                  * We also need vupdate irq for the actual core vblank handling
8021                  * at end of vblank.
8022                  */
8023                 dm_set_vupdate_irq(new_state->base.crtc, true);
8024                 drm_crtc_vblank_get(new_state->base.crtc);
8025                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8026                                  __func__, new_state->base.crtc->base.id);
8027         } else if (old_vrr_active && !new_vrr_active) {
8028                 /* Transition VRR active -> inactive:
8029                  * Allow vblank irq disable again for fixed refresh rate.
8030                  */
8031                 dm_set_vupdate_irq(new_state->base.crtc, false);
8032                 drm_crtc_vblank_put(new_state->base.crtc);
8033                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8034                                  __func__, new_state->base.crtc->base.id);
8035         }
8036 }
8037
8038 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8039 {
8040         struct drm_plane *plane;
8041         struct drm_plane_state *old_plane_state, *new_plane_state;
8042         int i;
8043
8044         /*
8045          * TODO: Make this per-stream so we don't issue redundant updates for
8046          * commits with multiple streams.
8047          */
8048         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8049                                        new_plane_state, i)
8050                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8051                         handle_cursor_update(plane, old_plane_state);
8052 }
8053
8054 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8055                                     struct dc_state *dc_state,
8056                                     struct drm_device *dev,
8057                                     struct amdgpu_display_manager *dm,
8058                                     struct drm_crtc *pcrtc,
8059                                     bool wait_for_vblank)
8060 {
8061         uint32_t i;
8062         uint64_t timestamp_ns;
8063         struct drm_plane *plane;
8064         struct drm_plane_state *old_plane_state, *new_plane_state;
8065         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8066         struct drm_crtc_state *new_pcrtc_state =
8067                         drm_atomic_get_new_crtc_state(state, pcrtc);
8068         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8069         struct dm_crtc_state *dm_old_crtc_state =
8070                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8071         int planes_count = 0, vpos, hpos;
8072         long r;
8073         unsigned long flags;
8074         struct amdgpu_bo *abo;
8075         uint32_t target_vblank, last_flip_vblank;
8076         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8077         bool pflip_present = false;
8078         struct {
8079                 struct dc_surface_update surface_updates[MAX_SURFACES];
8080                 struct dc_plane_info plane_infos[MAX_SURFACES];
8081                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8082                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8083                 struct dc_stream_update stream_update;
8084         } *bundle;
8085
8086         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8087
8088         if (!bundle) {
8089                 dm_error("Failed to allocate update bundle\n");
8090                 goto cleanup;
8091         }
8092
8093         /*
8094          * Disable the cursor first if we're disabling all the planes.
8095          * It'll remain on the screen after the planes are re-enabled
8096          * if we don't.
8097          */
8098         if (acrtc_state->active_planes == 0)
8099                 amdgpu_dm_commit_cursors(state);
8100
8101         /* update planes when needed */
8102         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8103                 struct drm_crtc *crtc = new_plane_state->crtc;
8104                 struct drm_crtc_state *new_crtc_state;
8105                 struct drm_framebuffer *fb = new_plane_state->fb;
8106                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8107                 bool plane_needs_flip;
8108                 struct dc_plane_state *dc_plane;
8109                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8110
8111                 /* Cursor plane is handled after stream updates */
8112                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8113                         continue;
8114
8115                 if (!fb || !crtc || pcrtc != crtc)
8116                         continue;
8117
8118                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8119                 if (!new_crtc_state->active)
8120                         continue;
8121
8122                 dc_plane = dm_new_plane_state->dc_state;
8123
8124                 bundle->surface_updates[planes_count].surface = dc_plane;
8125                 if (new_pcrtc_state->color_mgmt_changed) {
8126                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8127                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8128                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8129                 }
8130
8131                 fill_dc_scaling_info(new_plane_state,
8132                                      &bundle->scaling_infos[planes_count]);
8133
8134                 bundle->surface_updates[planes_count].scaling_info =
8135                         &bundle->scaling_infos[planes_count];
8136
8137                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8138
8139                 pflip_present = pflip_present || plane_needs_flip;
8140
8141                 if (!plane_needs_flip) {
8142                         planes_count += 1;
8143                         continue;
8144                 }
8145
8146                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8147
8148                 /*
8149                  * Wait for all fences on this FB. Do limited wait to avoid
8150                  * deadlock during GPU reset when this fence will not signal
8151                  * but we hold reservation lock for the BO.
8152                  */
8153                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8154                                                         false,
8155                                                         msecs_to_jiffies(5000));
8156                 if (unlikely(r <= 0))
8157                         DRM_ERROR("Waiting for fences timed out!");
8158
8159                 fill_dc_plane_info_and_addr(
8160                         dm->adev, new_plane_state,
8161                         afb->tiling_flags,
8162                         &bundle->plane_infos[planes_count],
8163                         &bundle->flip_addrs[planes_count].address,
8164                         afb->tmz_surface, false);
8165
8166                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8167                                  new_plane_state->plane->index,
8168                                  bundle->plane_infos[planes_count].dcc.enable);
8169
8170                 bundle->surface_updates[planes_count].plane_info =
8171                         &bundle->plane_infos[planes_count];
8172
8173                 /*
8174                  * Only allow immediate flips for fast updates that don't
8175                  * change FB pitch, DCC state, rotation or mirroing.
8176                  */
8177                 bundle->flip_addrs[planes_count].flip_immediate =
8178                         crtc->state->async_flip &&
8179                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8180
8181                 timestamp_ns = ktime_get_ns();
8182                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8183                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8184                 bundle->surface_updates[planes_count].surface = dc_plane;
8185
8186                 if (!bundle->surface_updates[planes_count].surface) {
8187                         DRM_ERROR("No surface for CRTC: id=%d\n",
8188                                         acrtc_attach->crtc_id);
8189                         continue;
8190                 }
8191
8192                 if (plane == pcrtc->primary)
8193                         update_freesync_state_on_stream(
8194                                 dm,
8195                                 acrtc_state,
8196                                 acrtc_state->stream,
8197                                 dc_plane,
8198                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8199
8200                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8201                                  __func__,
8202                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8203                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8204
8205                 planes_count += 1;
8206
8207         }
8208
8209         if (pflip_present) {
8210                 if (!vrr_active) {
8211                         /* Use old throttling in non-vrr fixed refresh rate mode
8212                          * to keep flip scheduling based on target vblank counts
8213                          * working in a backwards compatible way, e.g., for
8214                          * clients using the GLX_OML_sync_control extension or
8215                          * DRI3/Present extension with defined target_msc.
8216                          */
8217                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8218                 }
8219                 else {
8220                         /* For variable refresh rate mode only:
8221                          * Get vblank of last completed flip to avoid > 1 vrr
8222                          * flips per video frame by use of throttling, but allow
8223                          * flip programming anywhere in the possibly large
8224                          * variable vrr vblank interval for fine-grained flip
8225                          * timing control and more opportunity to avoid stutter
8226                          * on late submission of flips.
8227                          */
8228                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8229                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8230                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8231                 }
8232
8233                 target_vblank = last_flip_vblank + wait_for_vblank;
8234
8235                 /*
8236                  * Wait until we're out of the vertical blank period before the one
8237                  * targeted by the flip
8238                  */
8239                 while ((acrtc_attach->enabled &&
8240                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8241                                                             0, &vpos, &hpos, NULL,
8242                                                             NULL, &pcrtc->hwmode)
8243                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8244                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8245                         (int)(target_vblank -
8246                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8247                         usleep_range(1000, 1100);
8248                 }
8249
8250                 /**
8251                  * Prepare the flip event for the pageflip interrupt to handle.
8252                  *
8253                  * This only works in the case where we've already turned on the
8254                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8255                  * from 0 -> n planes we have to skip a hardware generated event
8256                  * and rely on sending it from software.
8257                  */
8258                 if (acrtc_attach->base.state->event &&
8259                     acrtc_state->active_planes > 0) {
8260                         drm_crtc_vblank_get(pcrtc);
8261
8262                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8263
8264                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8265                         prepare_flip_isr(acrtc_attach);
8266
8267                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8268                 }
8269
8270                 if (acrtc_state->stream) {
8271                         if (acrtc_state->freesync_vrr_info_changed)
8272                                 bundle->stream_update.vrr_infopacket =
8273                                         &acrtc_state->stream->vrr_infopacket;
8274                 }
8275         }
8276
8277         /* Update the planes if changed or disable if we don't have any. */
8278         if ((planes_count || acrtc_state->active_planes == 0) &&
8279                 acrtc_state->stream) {
8280                 bundle->stream_update.stream = acrtc_state->stream;
8281                 if (new_pcrtc_state->mode_changed) {
8282                         bundle->stream_update.src = acrtc_state->stream->src;
8283                         bundle->stream_update.dst = acrtc_state->stream->dst;
8284                 }
8285
8286                 if (new_pcrtc_state->color_mgmt_changed) {
8287                         /*
8288                          * TODO: This isn't fully correct since we've actually
8289                          * already modified the stream in place.
8290                          */
8291                         bundle->stream_update.gamut_remap =
8292                                 &acrtc_state->stream->gamut_remap_matrix;
8293                         bundle->stream_update.output_csc_transform =
8294                                 &acrtc_state->stream->csc_color_matrix;
8295                         bundle->stream_update.out_transfer_func =
8296                                 acrtc_state->stream->out_transfer_func;
8297                 }
8298
8299                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8300                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8301                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8302
8303                 /*
8304                  * If FreeSync state on the stream has changed then we need to
8305                  * re-adjust the min/max bounds now that DC doesn't handle this
8306                  * as part of commit.
8307                  */
8308                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8309                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8310                         dc_stream_adjust_vmin_vmax(
8311                                 dm->dc, acrtc_state->stream,
8312                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8313                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8314                 }
8315                 mutex_lock(&dm->dc_lock);
8316                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8317                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8318                         amdgpu_dm_psr_disable(acrtc_state->stream);
8319
8320                 dc_commit_updates_for_stream(dm->dc,
8321                                                      bundle->surface_updates,
8322                                                      planes_count,
8323                                                      acrtc_state->stream,
8324                                                      &bundle->stream_update,
8325                                                      dc_state);
8326
8327                 /**
8328                  * Enable or disable the interrupts on the backend.
8329                  *
8330                  * Most pipes are put into power gating when unused.
8331                  *
8332                  * When power gating is enabled on a pipe we lose the
8333                  * interrupt enablement state when power gating is disabled.
8334                  *
8335                  * So we need to update the IRQ control state in hardware
8336                  * whenever the pipe turns on (since it could be previously
8337                  * power gated) or off (since some pipes can't be power gated
8338                  * on some ASICs).
8339                  */
8340                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8341                         dm_update_pflip_irq_state(drm_to_adev(dev),
8342                                                   acrtc_attach);
8343
8344                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8345                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8346                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8347                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8348                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8349                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8350                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8351                         amdgpu_dm_psr_enable(acrtc_state->stream);
8352                 }
8353
8354                 mutex_unlock(&dm->dc_lock);
8355         }
8356
8357         /*
8358          * Update cursor state *after* programming all the planes.
8359          * This avoids redundant programming in the case where we're going
8360          * to be disabling a single plane - those pipes are being disabled.
8361          */
8362         if (acrtc_state->active_planes)
8363                 amdgpu_dm_commit_cursors(state);
8364
8365 cleanup:
8366         kfree(bundle);
8367 }
8368
8369 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8370                                    struct drm_atomic_state *state)
8371 {
8372         struct amdgpu_device *adev = drm_to_adev(dev);
8373         struct amdgpu_dm_connector *aconnector;
8374         struct drm_connector *connector;
8375         struct drm_connector_state *old_con_state, *new_con_state;
8376         struct drm_crtc_state *new_crtc_state;
8377         struct dm_crtc_state *new_dm_crtc_state;
8378         const struct dc_stream_status *status;
8379         int i, inst;
8380
8381         /* Notify device removals. */
8382         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8383                 if (old_con_state->crtc != new_con_state->crtc) {
8384                         /* CRTC changes require notification. */
8385                         goto notify;
8386                 }
8387
8388                 if (!new_con_state->crtc)
8389                         continue;
8390
8391                 new_crtc_state = drm_atomic_get_new_crtc_state(
8392                         state, new_con_state->crtc);
8393
8394                 if (!new_crtc_state)
8395                         continue;
8396
8397                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8398                         continue;
8399
8400         notify:
8401                 aconnector = to_amdgpu_dm_connector(connector);
8402
8403                 mutex_lock(&adev->dm.audio_lock);
8404                 inst = aconnector->audio_inst;
8405                 aconnector->audio_inst = -1;
8406                 mutex_unlock(&adev->dm.audio_lock);
8407
8408                 amdgpu_dm_audio_eld_notify(adev, inst);
8409         }
8410
8411         /* Notify audio device additions. */
8412         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8413                 if (!new_con_state->crtc)
8414                         continue;
8415
8416                 new_crtc_state = drm_atomic_get_new_crtc_state(
8417                         state, new_con_state->crtc);
8418
8419                 if (!new_crtc_state)
8420                         continue;
8421
8422                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8423                         continue;
8424
8425                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8426                 if (!new_dm_crtc_state->stream)
8427                         continue;
8428
8429                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8430                 if (!status)
8431                         continue;
8432
8433                 aconnector = to_amdgpu_dm_connector(connector);
8434
8435                 mutex_lock(&adev->dm.audio_lock);
8436                 inst = status->audio_inst;
8437                 aconnector->audio_inst = inst;
8438                 mutex_unlock(&adev->dm.audio_lock);
8439
8440                 amdgpu_dm_audio_eld_notify(adev, inst);
8441         }
8442 }
8443
8444 /*
8445  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8446  * @crtc_state: the DRM CRTC state
8447  * @stream_state: the DC stream state.
8448  *
8449  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8450  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8451  */
8452 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8453                                                 struct dc_stream_state *stream_state)
8454 {
8455         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8456 }
8457
8458 /**
8459  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8460  * @state: The atomic state to commit
8461  *
8462  * This will tell DC to commit the constructed DC state from atomic_check,
8463  * programming the hardware. Any failures here implies a hardware failure, since
8464  * atomic check should have filtered anything non-kosher.
8465  */
8466 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8467 {
8468         struct drm_device *dev = state->dev;
8469         struct amdgpu_device *adev = drm_to_adev(dev);
8470         struct amdgpu_display_manager *dm = &adev->dm;
8471         struct dm_atomic_state *dm_state;
8472         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8473         uint32_t i, j;
8474         struct drm_crtc *crtc;
8475         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8476         unsigned long flags;
8477         bool wait_for_vblank = true;
8478         struct drm_connector *connector;
8479         struct drm_connector_state *old_con_state, *new_con_state;
8480         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8481         int crtc_disable_count = 0;
8482         bool mode_set_reset_required = false;
8483
8484         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8485
8486         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8487
8488         dm_state = dm_atomic_get_new_state(state);
8489         if (dm_state && dm_state->context) {
8490                 dc_state = dm_state->context;
8491         } else {
8492                 /* No state changes, retain current state. */
8493                 dc_state_temp = dc_create_state(dm->dc);
8494                 ASSERT(dc_state_temp);
8495                 dc_state = dc_state_temp;
8496                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8497         }
8498
8499         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8500                                        new_crtc_state, i) {
8501                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8502
8503                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8504
8505                 if (old_crtc_state->active &&
8506                     (!new_crtc_state->active ||
8507                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8508                         manage_dm_interrupts(adev, acrtc, false);
8509                         dc_stream_release(dm_old_crtc_state->stream);
8510                 }
8511         }
8512
8513         drm_atomic_helper_calc_timestamping_constants(state);
8514
8515         /* update changed items */
8516         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8517                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8518
8519                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8520                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8521
8522                 DRM_DEBUG_DRIVER(
8523                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8524                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8525                         "connectors_changed:%d\n",
8526                         acrtc->crtc_id,
8527                         new_crtc_state->enable,
8528                         new_crtc_state->active,
8529                         new_crtc_state->planes_changed,
8530                         new_crtc_state->mode_changed,
8531                         new_crtc_state->active_changed,
8532                         new_crtc_state->connectors_changed);
8533
8534                 /* Disable cursor if disabling crtc */
8535                 if (old_crtc_state->active && !new_crtc_state->active) {
8536                         struct dc_cursor_position position;
8537
8538                         memset(&position, 0, sizeof(position));
8539                         mutex_lock(&dm->dc_lock);
8540                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8541                         mutex_unlock(&dm->dc_lock);
8542                 }
8543
8544                 /* Copy all transient state flags into dc state */
8545                 if (dm_new_crtc_state->stream) {
8546                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8547                                                             dm_new_crtc_state->stream);
8548                 }
8549
8550                 /* handles headless hotplug case, updating new_state and
8551                  * aconnector as needed
8552                  */
8553
8554                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8555
8556                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8557
8558                         if (!dm_new_crtc_state->stream) {
8559                                 /*
8560                                  * this could happen because of issues with
8561                                  * userspace notifications delivery.
8562                                  * In this case userspace tries to set mode on
8563                                  * display which is disconnected in fact.
8564                                  * dc_sink is NULL in this case on aconnector.
8565                                  * We expect reset mode will come soon.
8566                                  *
8567                                  * This can also happen when unplug is done
8568                                  * during resume sequence ended
8569                                  *
8570                                  * In this case, we want to pretend we still
8571                                  * have a sink to keep the pipe running so that
8572                                  * hw state is consistent with the sw state
8573                                  */
8574                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8575                                                 __func__, acrtc->base.base.id);
8576                                 continue;
8577                         }
8578
8579                         if (dm_old_crtc_state->stream)
8580                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8581
8582                         pm_runtime_get_noresume(dev->dev);
8583
8584                         acrtc->enabled = true;
8585                         acrtc->hw_mode = new_crtc_state->mode;
8586                         crtc->hwmode = new_crtc_state->mode;
8587                         mode_set_reset_required = true;
8588                 } else if (modereset_required(new_crtc_state)) {
8589                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8590                         /* i.e. reset mode */
8591                         if (dm_old_crtc_state->stream)
8592                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8593
8594                         mode_set_reset_required = true;
8595                 }
8596         } /* for_each_crtc_in_state() */
8597
8598         if (dc_state) {
8599                 /* if there mode set or reset, disable eDP PSR */
8600                 if (mode_set_reset_required)
8601                         amdgpu_dm_psr_disable_all(dm);
8602
8603                 dm_enable_per_frame_crtc_master_sync(dc_state);
8604                 mutex_lock(&dm->dc_lock);
8605                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8606                 mutex_unlock(&dm->dc_lock);
8607         }
8608
8609         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8610                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8611
8612                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8613
8614                 if (dm_new_crtc_state->stream != NULL) {
8615                         const struct dc_stream_status *status =
8616                                         dc_stream_get_status(dm_new_crtc_state->stream);
8617
8618                         if (!status)
8619                                 status = dc_stream_get_status_from_state(dc_state,
8620                                                                          dm_new_crtc_state->stream);
8621                         if (!status)
8622                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8623                         else
8624                                 acrtc->otg_inst = status->primary_otg_inst;
8625                 }
8626         }
8627 #ifdef CONFIG_DRM_AMD_DC_HDCP
8628         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8629                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8630                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8631                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8632
8633                 new_crtc_state = NULL;
8634
8635                 if (acrtc)
8636                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8637
8638                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8639
8640                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8641                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8642                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8643                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8644                         dm_new_con_state->update_hdcp = true;
8645                         continue;
8646                 }
8647
8648                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8649                         hdcp_update_display(
8650                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8651                                 new_con_state->hdcp_content_type,
8652                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8653         }
8654 #endif
8655
8656         /* Handle connector state changes */
8657         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8658                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8659                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8660                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8661                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8662                 struct dc_stream_update stream_update;
8663                 struct dc_info_packet hdr_packet;
8664                 struct dc_stream_status *status = NULL;
8665                 bool abm_changed, hdr_changed, scaling_changed;
8666
8667                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8668                 memset(&stream_update, 0, sizeof(stream_update));
8669
8670                 if (acrtc) {
8671                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8672                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8673                 }
8674
8675                 /* Skip any modesets/resets */
8676                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8677                         continue;
8678
8679                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8680                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8681
8682                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8683                                                              dm_old_con_state);
8684
8685                 abm_changed = dm_new_crtc_state->abm_level !=
8686                               dm_old_crtc_state->abm_level;
8687
8688                 hdr_changed =
8689                         is_hdr_metadata_different(old_con_state, new_con_state);
8690
8691                 if (!scaling_changed && !abm_changed && !hdr_changed)
8692                         continue;
8693
8694                 stream_update.stream = dm_new_crtc_state->stream;
8695                 if (scaling_changed) {
8696                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8697                                         dm_new_con_state, dm_new_crtc_state->stream);
8698
8699                         stream_update.src = dm_new_crtc_state->stream->src;
8700                         stream_update.dst = dm_new_crtc_state->stream->dst;
8701                 }
8702
8703                 if (abm_changed) {
8704                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8705
8706                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8707                 }
8708
8709                 if (hdr_changed) {
8710                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8711                         stream_update.hdr_static_metadata = &hdr_packet;
8712                 }
8713
8714                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8715                 WARN_ON(!status);
8716                 WARN_ON(!status->plane_count);
8717
8718                 /*
8719                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8720                  * Here we create an empty update on each plane.
8721                  * To fix this, DC should permit updating only stream properties.
8722                  */
8723                 for (j = 0; j < status->plane_count; j++)
8724                         dummy_updates[j].surface = status->plane_states[0];
8725
8726
8727                 mutex_lock(&dm->dc_lock);
8728                 dc_commit_updates_for_stream(dm->dc,
8729                                                      dummy_updates,
8730                                                      status->plane_count,
8731                                                      dm_new_crtc_state->stream,
8732                                                      &stream_update,
8733                                                      dc_state);
8734                 mutex_unlock(&dm->dc_lock);
8735         }
8736
8737         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8738         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8739                                       new_crtc_state, i) {
8740                 if (old_crtc_state->active && !new_crtc_state->active)
8741                         crtc_disable_count++;
8742
8743                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8744                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8745
8746                 /* For freesync config update on crtc state and params for irq */
8747                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8748
8749                 /* Handle vrr on->off / off->on transitions */
8750                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8751                                                 dm_new_crtc_state);
8752         }
8753
8754         /**
8755          * Enable interrupts for CRTCs that are newly enabled or went through
8756          * a modeset. It was intentionally deferred until after the front end
8757          * state was modified to wait until the OTG was on and so the IRQ
8758          * handlers didn't access stale or invalid state.
8759          */
8760         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8762 #ifdef CONFIG_DEBUG_FS
8763                 bool configure_crc = false;
8764                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8765 #endif
8766                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8767
8768                 if (new_crtc_state->active &&
8769                     (!old_crtc_state->active ||
8770                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8771                         dc_stream_retain(dm_new_crtc_state->stream);
8772                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8773                         manage_dm_interrupts(adev, acrtc, true);
8774
8775 #ifdef CONFIG_DEBUG_FS
8776                         /**
8777                          * Frontend may have changed so reapply the CRC capture
8778                          * settings for the stream.
8779                          */
8780                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8781                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8782                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8783                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8784
8785                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8786                                 configure_crc = true;
8787 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8788                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8789                                         configure_crc = false;
8790 #endif
8791                         }
8792
8793                         if (configure_crc)
8794                                 amdgpu_dm_crtc_configure_crc_source(
8795                                         crtc, dm_new_crtc_state, cur_crc_src);
8796 #endif
8797                 }
8798         }
8799
8800         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8801                 if (new_crtc_state->async_flip)
8802                         wait_for_vblank = false;
8803
8804         /* update planes when needed per crtc*/
8805         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8806                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8807
8808                 if (dm_new_crtc_state->stream)
8809                         amdgpu_dm_commit_planes(state, dc_state, dev,
8810                                                 dm, crtc, wait_for_vblank);
8811         }
8812
8813         /* Update audio instances for each connector. */
8814         amdgpu_dm_commit_audio(dev, state);
8815
8816         /*
8817          * send vblank event on all events not handled in flip and
8818          * mark consumed event for drm_atomic_helper_commit_hw_done
8819          */
8820         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8821         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8822
8823                 if (new_crtc_state->event)
8824                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8825
8826                 new_crtc_state->event = NULL;
8827         }
8828         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8829
8830         /* Signal HW programming completion */
8831         drm_atomic_helper_commit_hw_done(state);
8832
8833         if (wait_for_vblank)
8834                 drm_atomic_helper_wait_for_flip_done(dev, state);
8835
8836         drm_atomic_helper_cleanup_planes(dev, state);
8837
8838         /* return the stolen vga memory back to VRAM */
8839         if (!adev->mman.keep_stolen_vga_memory)
8840                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8841         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8842
8843         /*
8844          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8845          * so we can put the GPU into runtime suspend if we're not driving any
8846          * displays anymore
8847          */
8848         for (i = 0; i < crtc_disable_count; i++)
8849                 pm_runtime_put_autosuspend(dev->dev);
8850         pm_runtime_mark_last_busy(dev->dev);
8851
8852         if (dc_state_temp)
8853                 dc_release_state(dc_state_temp);
8854 }
8855
8856
8857 static int dm_force_atomic_commit(struct drm_connector *connector)
8858 {
8859         int ret = 0;
8860         struct drm_device *ddev = connector->dev;
8861         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8862         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8863         struct drm_plane *plane = disconnected_acrtc->base.primary;
8864         struct drm_connector_state *conn_state;
8865         struct drm_crtc_state *crtc_state;
8866         struct drm_plane_state *plane_state;
8867
8868         if (!state)
8869                 return -ENOMEM;
8870
8871         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8872
8873         /* Construct an atomic state to restore previous display setting */
8874
8875         /*
8876          * Attach connectors to drm_atomic_state
8877          */
8878         conn_state = drm_atomic_get_connector_state(state, connector);
8879
8880         ret = PTR_ERR_OR_ZERO(conn_state);
8881         if (ret)
8882                 goto out;
8883
8884         /* Attach crtc to drm_atomic_state*/
8885         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8886
8887         ret = PTR_ERR_OR_ZERO(crtc_state);
8888         if (ret)
8889                 goto out;
8890
8891         /* force a restore */
8892         crtc_state->mode_changed = true;
8893
8894         /* Attach plane to drm_atomic_state */
8895         plane_state = drm_atomic_get_plane_state(state, plane);
8896
8897         ret = PTR_ERR_OR_ZERO(plane_state);
8898         if (ret)
8899                 goto out;
8900
8901         /* Call commit internally with the state we just constructed */
8902         ret = drm_atomic_commit(state);
8903
8904 out:
8905         drm_atomic_state_put(state);
8906         if (ret)
8907                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8908
8909         return ret;
8910 }
8911
8912 /*
8913  * This function handles all cases when set mode does not come upon hotplug.
8914  * This includes when a display is unplugged then plugged back into the
8915  * same port and when running without usermode desktop manager supprot
8916  */
8917 void dm_restore_drm_connector_state(struct drm_device *dev,
8918                                     struct drm_connector *connector)
8919 {
8920         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8921         struct amdgpu_crtc *disconnected_acrtc;
8922         struct dm_crtc_state *acrtc_state;
8923
8924         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8925                 return;
8926
8927         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8928         if (!disconnected_acrtc)
8929                 return;
8930
8931         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8932         if (!acrtc_state->stream)
8933                 return;
8934
8935         /*
8936          * If the previous sink is not released and different from the current,
8937          * we deduce we are in a state where we can not rely on usermode call
8938          * to turn on the display, so we do it here
8939          */
8940         if (acrtc_state->stream->sink != aconnector->dc_sink)
8941                 dm_force_atomic_commit(&aconnector->base);
8942 }
8943
8944 /*
8945  * Grabs all modesetting locks to serialize against any blocking commits,
8946  * Waits for completion of all non blocking commits.
8947  */
8948 static int do_aquire_global_lock(struct drm_device *dev,
8949                                  struct drm_atomic_state *state)
8950 {
8951         struct drm_crtc *crtc;
8952         struct drm_crtc_commit *commit;
8953         long ret;
8954
8955         /*
8956          * Adding all modeset locks to aquire_ctx will
8957          * ensure that when the framework release it the
8958          * extra locks we are locking here will get released to
8959          */
8960         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8961         if (ret)
8962                 return ret;
8963
8964         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8965                 spin_lock(&crtc->commit_lock);
8966                 commit = list_first_entry_or_null(&crtc->commit_list,
8967                                 struct drm_crtc_commit, commit_entry);
8968                 if (commit)
8969                         drm_crtc_commit_get(commit);
8970                 spin_unlock(&crtc->commit_lock);
8971
8972                 if (!commit)
8973                         continue;
8974
8975                 /*
8976                  * Make sure all pending HW programming completed and
8977                  * page flips done
8978                  */
8979                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8980
8981                 if (ret > 0)
8982                         ret = wait_for_completion_interruptible_timeout(
8983                                         &commit->flip_done, 10*HZ);
8984
8985                 if (ret == 0)
8986                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8987                                   "timed out\n", crtc->base.id, crtc->name);
8988
8989                 drm_crtc_commit_put(commit);
8990         }
8991
8992         return ret < 0 ? ret : 0;
8993 }
8994
8995 static void get_freesync_config_for_crtc(
8996         struct dm_crtc_state *new_crtc_state,
8997         struct dm_connector_state *new_con_state)
8998 {
8999         struct mod_freesync_config config = {0};
9000         struct amdgpu_dm_connector *aconnector =
9001                         to_amdgpu_dm_connector(new_con_state->base.connector);
9002         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9003         int vrefresh = drm_mode_vrefresh(mode);
9004         bool fs_vid_mode = false;
9005
9006         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9007                                         vrefresh >= aconnector->min_vfreq &&
9008                                         vrefresh <= aconnector->max_vfreq;
9009
9010         if (new_crtc_state->vrr_supported) {
9011                 new_crtc_state->stream->ignore_msa_timing_param = true;
9012                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9013
9014                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9015                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9016                 config.vsif_supported = true;
9017                 config.btr = true;
9018
9019                 if (fs_vid_mode) {
9020                         config.state = VRR_STATE_ACTIVE_FIXED;
9021                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9022                         goto out;
9023                 } else if (new_crtc_state->base.vrr_enabled) {
9024                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9025                 } else {
9026                         config.state = VRR_STATE_INACTIVE;
9027                 }
9028         }
9029 out:
9030         new_crtc_state->freesync_config = config;
9031 }
9032
9033 static void reset_freesync_config_for_crtc(
9034         struct dm_crtc_state *new_crtc_state)
9035 {
9036         new_crtc_state->vrr_supported = false;
9037
9038         memset(&new_crtc_state->vrr_infopacket, 0,
9039                sizeof(new_crtc_state->vrr_infopacket));
9040 }
9041
9042 static bool
9043 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9044                                  struct drm_crtc_state *new_crtc_state)
9045 {
9046         struct drm_display_mode old_mode, new_mode;
9047
9048         if (!old_crtc_state || !new_crtc_state)
9049                 return false;
9050
9051         old_mode = old_crtc_state->mode;
9052         new_mode = new_crtc_state->mode;
9053
9054         if (old_mode.clock       == new_mode.clock &&
9055             old_mode.hdisplay    == new_mode.hdisplay &&
9056             old_mode.vdisplay    == new_mode.vdisplay &&
9057             old_mode.htotal      == new_mode.htotal &&
9058             old_mode.vtotal      != new_mode.vtotal &&
9059             old_mode.hsync_start == new_mode.hsync_start &&
9060             old_mode.vsync_start != new_mode.vsync_start &&
9061             old_mode.hsync_end   == new_mode.hsync_end &&
9062             old_mode.vsync_end   != new_mode.vsync_end &&
9063             old_mode.hskew       == new_mode.hskew &&
9064             old_mode.vscan       == new_mode.vscan &&
9065             (old_mode.vsync_end - old_mode.vsync_start) ==
9066             (new_mode.vsync_end - new_mode.vsync_start))
9067                 return true;
9068
9069         return false;
9070 }
9071
9072 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9073         uint64_t num, den, res;
9074         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9075
9076         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9077
9078         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9079         den = (unsigned long long)new_crtc_state->mode.htotal *
9080               (unsigned long long)new_crtc_state->mode.vtotal;
9081
9082         res = div_u64(num, den);
9083         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9084 }
9085
9086 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9087                                 struct drm_atomic_state *state,
9088                                 struct drm_crtc *crtc,
9089                                 struct drm_crtc_state *old_crtc_state,
9090                                 struct drm_crtc_state *new_crtc_state,
9091                                 bool enable,
9092                                 bool *lock_and_validation_needed)
9093 {
9094         struct dm_atomic_state *dm_state = NULL;
9095         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9096         struct dc_stream_state *new_stream;
9097         int ret = 0;
9098
9099         /*
9100          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9101          * update changed items
9102          */
9103         struct amdgpu_crtc *acrtc = NULL;
9104         struct amdgpu_dm_connector *aconnector = NULL;
9105         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9106         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9107
9108         new_stream = NULL;
9109
9110         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9111         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9112         acrtc = to_amdgpu_crtc(crtc);
9113         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9114
9115         /* TODO This hack should go away */
9116         if (aconnector && enable) {
9117                 /* Make sure fake sink is created in plug-in scenario */
9118                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9119                                                             &aconnector->base);
9120                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9121                                                             &aconnector->base);
9122
9123                 if (IS_ERR(drm_new_conn_state)) {
9124                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9125                         goto fail;
9126                 }
9127
9128                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9129                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9130
9131                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9132                         goto skip_modeset;
9133
9134                 new_stream = create_validate_stream_for_sink(aconnector,
9135                                                              &new_crtc_state->mode,
9136                                                              dm_new_conn_state,
9137                                                              dm_old_crtc_state->stream);
9138
9139                 /*
9140                  * we can have no stream on ACTION_SET if a display
9141                  * was disconnected during S3, in this case it is not an
9142                  * error, the OS will be updated after detection, and
9143                  * will do the right thing on next atomic commit
9144                  */
9145
9146                 if (!new_stream) {
9147                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9148                                         __func__, acrtc->base.base.id);
9149                         ret = -ENOMEM;
9150                         goto fail;
9151                 }
9152
9153                 /*
9154                  * TODO: Check VSDB bits to decide whether this should
9155                  * be enabled or not.
9156                  */
9157                 new_stream->triggered_crtc_reset.enabled =
9158                         dm->force_timing_sync;
9159
9160                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9161
9162                 ret = fill_hdr_info_packet(drm_new_conn_state,
9163                                            &new_stream->hdr_static_metadata);
9164                 if (ret)
9165                         goto fail;
9166
9167                 /*
9168                  * If we already removed the old stream from the context
9169                  * (and set the new stream to NULL) then we can't reuse
9170                  * the old stream even if the stream and scaling are unchanged.
9171                  * We'll hit the BUG_ON and black screen.
9172                  *
9173                  * TODO: Refactor this function to allow this check to work
9174                  * in all conditions.
9175                  */
9176                 if (amdgpu_freesync_vid_mode &&
9177                     dm_new_crtc_state->stream &&
9178                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9179                         goto skip_modeset;
9180
9181                 if (dm_new_crtc_state->stream &&
9182                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9183                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9184                         new_crtc_state->mode_changed = false;
9185                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9186                                          new_crtc_state->mode_changed);
9187                 }
9188         }
9189
9190         /* mode_changed flag may get updated above, need to check again */
9191         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9192                 goto skip_modeset;
9193
9194         DRM_DEBUG_DRIVER(
9195                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9196                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9197                 "connectors_changed:%d\n",
9198                 acrtc->crtc_id,
9199                 new_crtc_state->enable,
9200                 new_crtc_state->active,
9201                 new_crtc_state->planes_changed,
9202                 new_crtc_state->mode_changed,
9203                 new_crtc_state->active_changed,
9204                 new_crtc_state->connectors_changed);
9205
9206         /* Remove stream for any changed/disabled CRTC */
9207         if (!enable) {
9208
9209                 if (!dm_old_crtc_state->stream)
9210                         goto skip_modeset;
9211
9212                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9213                     is_timing_unchanged_for_freesync(new_crtc_state,
9214                                                      old_crtc_state)) {
9215                         new_crtc_state->mode_changed = false;
9216                         DRM_DEBUG_DRIVER(
9217                                 "Mode change not required for front porch change, "
9218                                 "setting mode_changed to %d",
9219                                 new_crtc_state->mode_changed);
9220
9221                         set_freesync_fixed_config(dm_new_crtc_state);
9222
9223                         goto skip_modeset;
9224                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9225                            is_freesync_video_mode(&new_crtc_state->mode,
9226                                                   aconnector)) {
9227                         set_freesync_fixed_config(dm_new_crtc_state);
9228                 }
9229
9230                 ret = dm_atomic_get_state(state, &dm_state);
9231                 if (ret)
9232                         goto fail;
9233
9234                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9235                                 crtc->base.id);
9236
9237                 /* i.e. reset mode */
9238                 if (dc_remove_stream_from_ctx(
9239                                 dm->dc,
9240                                 dm_state->context,
9241                                 dm_old_crtc_state->stream) != DC_OK) {
9242                         ret = -EINVAL;
9243                         goto fail;
9244                 }
9245
9246                 dc_stream_release(dm_old_crtc_state->stream);
9247                 dm_new_crtc_state->stream = NULL;
9248
9249                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9250
9251                 *lock_and_validation_needed = true;
9252
9253         } else {/* Add stream for any updated/enabled CRTC */
9254                 /*
9255                  * Quick fix to prevent NULL pointer on new_stream when
9256                  * added MST connectors not found in existing crtc_state in the chained mode
9257                  * TODO: need to dig out the root cause of that
9258                  */
9259                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9260                         goto skip_modeset;
9261
9262                 if (modereset_required(new_crtc_state))
9263                         goto skip_modeset;
9264
9265                 if (modeset_required(new_crtc_state, new_stream,
9266                                      dm_old_crtc_state->stream)) {
9267
9268                         WARN_ON(dm_new_crtc_state->stream);
9269
9270                         ret = dm_atomic_get_state(state, &dm_state);
9271                         if (ret)
9272                                 goto fail;
9273
9274                         dm_new_crtc_state->stream = new_stream;
9275
9276                         dc_stream_retain(new_stream);
9277
9278                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9279                                                 crtc->base.id);
9280
9281                         if (dc_add_stream_to_ctx(
9282                                         dm->dc,
9283                                         dm_state->context,
9284                                         dm_new_crtc_state->stream) != DC_OK) {
9285                                 ret = -EINVAL;
9286                                 goto fail;
9287                         }
9288
9289                         *lock_and_validation_needed = true;
9290                 }
9291         }
9292
9293 skip_modeset:
9294         /* Release extra reference */
9295         if (new_stream)
9296                  dc_stream_release(new_stream);
9297
9298         /*
9299          * We want to do dc stream updates that do not require a
9300          * full modeset below.
9301          */
9302         if (!(enable && aconnector && new_crtc_state->active))
9303                 return 0;
9304         /*
9305          * Given above conditions, the dc state cannot be NULL because:
9306          * 1. We're in the process of enabling CRTCs (just been added
9307          *    to the dc context, or already is on the context)
9308          * 2. Has a valid connector attached, and
9309          * 3. Is currently active and enabled.
9310          * => The dc stream state currently exists.
9311          */
9312         BUG_ON(dm_new_crtc_state->stream == NULL);
9313
9314         /* Scaling or underscan settings */
9315         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9316                 update_stream_scaling_settings(
9317                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9318
9319         /* ABM settings */
9320         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9321
9322         /*
9323          * Color management settings. We also update color properties
9324          * when a modeset is needed, to ensure it gets reprogrammed.
9325          */
9326         if (dm_new_crtc_state->base.color_mgmt_changed ||
9327             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9328                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9329                 if (ret)
9330                         goto fail;
9331         }
9332
9333         /* Update Freesync settings. */
9334         get_freesync_config_for_crtc(dm_new_crtc_state,
9335                                      dm_new_conn_state);
9336
9337         return ret;
9338
9339 fail:
9340         if (new_stream)
9341                 dc_stream_release(new_stream);
9342         return ret;
9343 }
9344
9345 static bool should_reset_plane(struct drm_atomic_state *state,
9346                                struct drm_plane *plane,
9347                                struct drm_plane_state *old_plane_state,
9348                                struct drm_plane_state *new_plane_state)
9349 {
9350         struct drm_plane *other;
9351         struct drm_plane_state *old_other_state, *new_other_state;
9352         struct drm_crtc_state *new_crtc_state;
9353         int i;
9354
9355         /*
9356          * TODO: Remove this hack once the checks below are sufficient
9357          * enough to determine when we need to reset all the planes on
9358          * the stream.
9359          */
9360         if (state->allow_modeset)
9361                 return true;
9362
9363         /* Exit early if we know that we're adding or removing the plane. */
9364         if (old_plane_state->crtc != new_plane_state->crtc)
9365                 return true;
9366
9367         /* old crtc == new_crtc == NULL, plane not in context. */
9368         if (!new_plane_state->crtc)
9369                 return false;
9370
9371         new_crtc_state =
9372                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9373
9374         if (!new_crtc_state)
9375                 return true;
9376
9377         /* CRTC Degamma changes currently require us to recreate planes. */
9378         if (new_crtc_state->color_mgmt_changed)
9379                 return true;
9380
9381         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9382                 return true;
9383
9384         /*
9385          * If there are any new primary or overlay planes being added or
9386          * removed then the z-order can potentially change. To ensure
9387          * correct z-order and pipe acquisition the current DC architecture
9388          * requires us to remove and recreate all existing planes.
9389          *
9390          * TODO: Come up with a more elegant solution for this.
9391          */
9392         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9393                 struct amdgpu_framebuffer *old_afb, *new_afb;
9394                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9395                         continue;
9396
9397                 if (old_other_state->crtc != new_plane_state->crtc &&
9398                     new_other_state->crtc != new_plane_state->crtc)
9399                         continue;
9400
9401                 if (old_other_state->crtc != new_other_state->crtc)
9402                         return true;
9403
9404                 /* Src/dst size and scaling updates. */
9405                 if (old_other_state->src_w != new_other_state->src_w ||
9406                     old_other_state->src_h != new_other_state->src_h ||
9407                     old_other_state->crtc_w != new_other_state->crtc_w ||
9408                     old_other_state->crtc_h != new_other_state->crtc_h)
9409                         return true;
9410
9411                 /* Rotation / mirroring updates. */
9412                 if (old_other_state->rotation != new_other_state->rotation)
9413                         return true;
9414
9415                 /* Blending updates. */
9416                 if (old_other_state->pixel_blend_mode !=
9417                     new_other_state->pixel_blend_mode)
9418                         return true;
9419
9420                 /* Alpha updates. */
9421                 if (old_other_state->alpha != new_other_state->alpha)
9422                         return true;
9423
9424                 /* Colorspace changes. */
9425                 if (old_other_state->color_range != new_other_state->color_range ||
9426                     old_other_state->color_encoding != new_other_state->color_encoding)
9427                         return true;
9428
9429                 /* Framebuffer checks fall at the end. */
9430                 if (!old_other_state->fb || !new_other_state->fb)
9431                         continue;
9432
9433                 /* Pixel format changes can require bandwidth updates. */
9434                 if (old_other_state->fb->format != new_other_state->fb->format)
9435                         return true;
9436
9437                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9438                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9439
9440                 /* Tiling and DCC changes also require bandwidth updates. */
9441                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9442                     old_afb->base.modifier != new_afb->base.modifier)
9443                         return true;
9444         }
9445
9446         return false;
9447 }
9448
9449 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9450                               struct drm_plane_state *new_plane_state,
9451                               struct drm_framebuffer *fb)
9452 {
9453         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9454         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9455         unsigned int pitch;
9456         bool linear;
9457
9458         if (fb->width > new_acrtc->max_cursor_width ||
9459             fb->height > new_acrtc->max_cursor_height) {
9460                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9461                                  new_plane_state->fb->width,
9462                                  new_plane_state->fb->height);
9463                 return -EINVAL;
9464         }
9465         if (new_plane_state->src_w != fb->width << 16 ||
9466             new_plane_state->src_h != fb->height << 16) {
9467                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9468                 return -EINVAL;
9469         }
9470
9471         /* Pitch in pixels */
9472         pitch = fb->pitches[0] / fb->format->cpp[0];
9473
9474         if (fb->width != pitch) {
9475                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9476                                  fb->width, pitch);
9477                 return -EINVAL;
9478         }
9479
9480         switch (pitch) {
9481         case 64:
9482         case 128:
9483         case 256:
9484                 /* FB pitch is supported by cursor plane */
9485                 break;
9486         default:
9487                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9488                 return -EINVAL;
9489         }
9490
9491         /* Core DRM takes care of checking FB modifiers, so we only need to
9492          * check tiling flags when the FB doesn't have a modifier. */
9493         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9494                 if (adev->family < AMDGPU_FAMILY_AI) {
9495                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9496                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9497                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9498                 } else {
9499                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9500                 }
9501                 if (!linear) {
9502                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9503                         return -EINVAL;
9504                 }
9505         }
9506
9507         return 0;
9508 }
9509
9510 static int dm_update_plane_state(struct dc *dc,
9511                                  struct drm_atomic_state *state,
9512                                  struct drm_plane *plane,
9513                                  struct drm_plane_state *old_plane_state,
9514                                  struct drm_plane_state *new_plane_state,
9515                                  bool enable,
9516                                  bool *lock_and_validation_needed)
9517 {
9518
9519         struct dm_atomic_state *dm_state = NULL;
9520         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9521         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9522         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9523         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9524         struct amdgpu_crtc *new_acrtc;
9525         bool needs_reset;
9526         int ret = 0;
9527
9528
9529         new_plane_crtc = new_plane_state->crtc;
9530         old_plane_crtc = old_plane_state->crtc;
9531         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9532         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9533
9534         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9535                 if (!enable || !new_plane_crtc ||
9536                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9537                         return 0;
9538
9539                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9540
9541                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9542                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9543                         return -EINVAL;
9544                 }
9545
9546                 if (new_plane_state->fb) {
9547                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9548                                                  new_plane_state->fb);
9549                         if (ret)
9550                                 return ret;
9551                 }
9552
9553                 return 0;
9554         }
9555
9556         needs_reset = should_reset_plane(state, plane, old_plane_state,
9557                                          new_plane_state);
9558
9559         /* Remove any changed/removed planes */
9560         if (!enable) {
9561                 if (!needs_reset)
9562                         return 0;
9563
9564                 if (!old_plane_crtc)
9565                         return 0;
9566
9567                 old_crtc_state = drm_atomic_get_old_crtc_state(
9568                                 state, old_plane_crtc);
9569                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9570
9571                 if (!dm_old_crtc_state->stream)
9572                         return 0;
9573
9574                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9575                                 plane->base.id, old_plane_crtc->base.id);
9576
9577                 ret = dm_atomic_get_state(state, &dm_state);
9578                 if (ret)
9579                         return ret;
9580
9581                 if (!dc_remove_plane_from_context(
9582                                 dc,
9583                                 dm_old_crtc_state->stream,
9584                                 dm_old_plane_state->dc_state,
9585                                 dm_state->context)) {
9586
9587                         return -EINVAL;
9588                 }
9589
9590
9591                 dc_plane_state_release(dm_old_plane_state->dc_state);
9592                 dm_new_plane_state->dc_state = NULL;
9593
9594                 *lock_and_validation_needed = true;
9595
9596         } else { /* Add new planes */
9597                 struct dc_plane_state *dc_new_plane_state;
9598
9599                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9600                         return 0;
9601
9602                 if (!new_plane_crtc)
9603                         return 0;
9604
9605                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9606                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9607
9608                 if (!dm_new_crtc_state->stream)
9609                         return 0;
9610
9611                 if (!needs_reset)
9612                         return 0;
9613
9614                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9615                 if (ret)
9616                         return ret;
9617
9618                 WARN_ON(dm_new_plane_state->dc_state);
9619
9620                 dc_new_plane_state = dc_create_plane_state(dc);
9621                 if (!dc_new_plane_state)
9622                         return -ENOMEM;
9623
9624                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9625                                 plane->base.id, new_plane_crtc->base.id);
9626
9627                 ret = fill_dc_plane_attributes(
9628                         drm_to_adev(new_plane_crtc->dev),
9629                         dc_new_plane_state,
9630                         new_plane_state,
9631                         new_crtc_state);
9632                 if (ret) {
9633                         dc_plane_state_release(dc_new_plane_state);
9634                         return ret;
9635                 }
9636
9637                 ret = dm_atomic_get_state(state, &dm_state);
9638                 if (ret) {
9639                         dc_plane_state_release(dc_new_plane_state);
9640                         return ret;
9641                 }
9642
9643                 /*
9644                  * Any atomic check errors that occur after this will
9645                  * not need a release. The plane state will be attached
9646                  * to the stream, and therefore part of the atomic
9647                  * state. It'll be released when the atomic state is
9648                  * cleaned.
9649                  */
9650                 if (!dc_add_plane_to_context(
9651                                 dc,
9652                                 dm_new_crtc_state->stream,
9653                                 dc_new_plane_state,
9654                                 dm_state->context)) {
9655
9656                         dc_plane_state_release(dc_new_plane_state);
9657                         return -EINVAL;
9658                 }
9659
9660                 dm_new_plane_state->dc_state = dc_new_plane_state;
9661
9662                 /* Tell DC to do a full surface update every time there
9663                  * is a plane change. Inefficient, but works for now.
9664                  */
9665                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9666
9667                 *lock_and_validation_needed = true;
9668         }
9669
9670
9671         return ret;
9672 }
9673
9674 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9675                                 struct drm_crtc *crtc,
9676                                 struct drm_crtc_state *new_crtc_state)
9677 {
9678         struct drm_plane_state *new_cursor_state, *new_primary_state;
9679         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9680
9681         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9682          * cursor per pipe but it's going to inherit the scaling and
9683          * positioning from the underlying pipe. Check the cursor plane's
9684          * blending properties match the primary plane's. */
9685
9686         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9687         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9688         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9689                 return 0;
9690         }
9691
9692         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9693                          (new_cursor_state->src_w >> 16);
9694         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9695                          (new_cursor_state->src_h >> 16);
9696
9697         primary_scale_w = new_primary_state->crtc_w * 1000 /
9698                          (new_primary_state->src_w >> 16);
9699         primary_scale_h = new_primary_state->crtc_h * 1000 /
9700                          (new_primary_state->src_h >> 16);
9701
9702         if (cursor_scale_w != primary_scale_w ||
9703             cursor_scale_h != primary_scale_h) {
9704                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9705                 return -EINVAL;
9706         }
9707
9708         return 0;
9709 }
9710
9711 #if defined(CONFIG_DRM_AMD_DC_DCN)
9712 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9713 {
9714         struct drm_connector *connector;
9715         struct drm_connector_state *conn_state;
9716         struct amdgpu_dm_connector *aconnector = NULL;
9717         int i;
9718         for_each_new_connector_in_state(state, connector, conn_state, i) {
9719                 if (conn_state->crtc != crtc)
9720                         continue;
9721
9722                 aconnector = to_amdgpu_dm_connector(connector);
9723                 if (!aconnector->port || !aconnector->mst_port)
9724                         aconnector = NULL;
9725                 else
9726                         break;
9727         }
9728
9729         if (!aconnector)
9730                 return 0;
9731
9732         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9733 }
9734 #endif
9735
9736 /**
9737  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9738  * @dev: The DRM device
9739  * @state: The atomic state to commit
9740  *
9741  * Validate that the given atomic state is programmable by DC into hardware.
9742  * This involves constructing a &struct dc_state reflecting the new hardware
9743  * state we wish to commit, then querying DC to see if it is programmable. It's
9744  * important not to modify the existing DC state. Otherwise, atomic_check
9745  * may unexpectedly commit hardware changes.
9746  *
9747  * When validating the DC state, it's important that the right locks are
9748  * acquired. For full updates case which removes/adds/updates streams on one
9749  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9750  * that any such full update commit will wait for completion of any outstanding
9751  * flip using DRMs synchronization events.
9752  *
9753  * Note that DM adds the affected connectors for all CRTCs in state, when that
9754  * might not seem necessary. This is because DC stream creation requires the
9755  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9756  * be possible but non-trivial - a possible TODO item.
9757  *
9758  * Return: -Error code if validation failed.
9759  */
9760 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9761                                   struct drm_atomic_state *state)
9762 {
9763         struct amdgpu_device *adev = drm_to_adev(dev);
9764         struct dm_atomic_state *dm_state = NULL;
9765         struct dc *dc = adev->dm.dc;
9766         struct drm_connector *connector;
9767         struct drm_connector_state *old_con_state, *new_con_state;
9768         struct drm_crtc *crtc;
9769         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9770         struct drm_plane *plane;
9771         struct drm_plane_state *old_plane_state, *new_plane_state;
9772         enum dc_status status;
9773         int ret, i;
9774         bool lock_and_validation_needed = false;
9775         struct dm_crtc_state *dm_old_crtc_state;
9776
9777         trace_amdgpu_dm_atomic_check_begin(state);
9778
9779         ret = drm_atomic_helper_check_modeset(dev, state);
9780         if (ret)
9781                 goto fail;
9782
9783         /* Check connector changes */
9784         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9785                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9786                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9787
9788                 /* Skip connectors that are disabled or part of modeset already. */
9789                 if (!old_con_state->crtc && !new_con_state->crtc)
9790                         continue;
9791
9792                 if (!new_con_state->crtc)
9793                         continue;
9794
9795                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9796                 if (IS_ERR(new_crtc_state)) {
9797                         ret = PTR_ERR(new_crtc_state);
9798                         goto fail;
9799                 }
9800
9801                 if (dm_old_con_state->abm_level !=
9802                     dm_new_con_state->abm_level)
9803                         new_crtc_state->connectors_changed = true;
9804         }
9805
9806 #if defined(CONFIG_DRM_AMD_DC_DCN)
9807         if (dc_resource_is_dsc_encoding_supported(dc)) {
9808                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9809                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9810                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9811                                 if (ret)
9812                                         goto fail;
9813                         }
9814                 }
9815         }
9816 #endif
9817         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9818                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9819
9820                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9821                     !new_crtc_state->color_mgmt_changed &&
9822                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9823                         dm_old_crtc_state->dsc_force_changed == false)
9824                         continue;
9825
9826                 if (!new_crtc_state->enable)
9827                         continue;
9828
9829                 ret = drm_atomic_add_affected_connectors(state, crtc);
9830                 if (ret)
9831                         return ret;
9832
9833                 ret = drm_atomic_add_affected_planes(state, crtc);
9834                 if (ret)
9835                         goto fail;
9836
9837                 if (dm_old_crtc_state->dsc_force_changed)
9838                         new_crtc_state->mode_changed = true;
9839         }
9840
9841         /*
9842          * Add all primary and overlay planes on the CRTC to the state
9843          * whenever a plane is enabled to maintain correct z-ordering
9844          * and to enable fast surface updates.
9845          */
9846         drm_for_each_crtc(crtc, dev) {
9847                 bool modified = false;
9848
9849                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9850                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9851                                 continue;
9852
9853                         if (new_plane_state->crtc == crtc ||
9854                             old_plane_state->crtc == crtc) {
9855                                 modified = true;
9856                                 break;
9857                         }
9858                 }
9859
9860                 if (!modified)
9861                         continue;
9862
9863                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9864                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9865                                 continue;
9866
9867                         new_plane_state =
9868                                 drm_atomic_get_plane_state(state, plane);
9869
9870                         if (IS_ERR(new_plane_state)) {
9871                                 ret = PTR_ERR(new_plane_state);
9872                                 goto fail;
9873                         }
9874                 }
9875         }
9876
9877         /* Remove exiting planes if they are modified */
9878         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9879                 ret = dm_update_plane_state(dc, state, plane,
9880                                             old_plane_state,
9881                                             new_plane_state,
9882                                             false,
9883                                             &lock_and_validation_needed);
9884                 if (ret)
9885                         goto fail;
9886         }
9887
9888         /* Disable all crtcs which require disable */
9889         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9890                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9891                                            old_crtc_state,
9892                                            new_crtc_state,
9893                                            false,
9894                                            &lock_and_validation_needed);
9895                 if (ret)
9896                         goto fail;
9897         }
9898
9899         /* Enable all crtcs which require enable */
9900         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9901                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9902                                            old_crtc_state,
9903                                            new_crtc_state,
9904                                            true,
9905                                            &lock_and_validation_needed);
9906                 if (ret)
9907                         goto fail;
9908         }
9909
9910         /* Add new/modified planes */
9911         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9912                 ret = dm_update_plane_state(dc, state, plane,
9913                                             old_plane_state,
9914                                             new_plane_state,
9915                                             true,
9916                                             &lock_and_validation_needed);
9917                 if (ret)
9918                         goto fail;
9919         }
9920
9921         /* Run this here since we want to validate the streams we created */
9922         ret = drm_atomic_helper_check_planes(dev, state);
9923         if (ret)
9924                 goto fail;
9925
9926         /* Check cursor planes scaling */
9927         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9928                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9929                 if (ret)
9930                         goto fail;
9931         }
9932
9933         if (state->legacy_cursor_update) {
9934                 /*
9935                  * This is a fast cursor update coming from the plane update
9936                  * helper, check if it can be done asynchronously for better
9937                  * performance.
9938                  */
9939                 state->async_update =
9940                         !drm_atomic_helper_async_check(dev, state);
9941
9942                 /*
9943                  * Skip the remaining global validation if this is an async
9944                  * update. Cursor updates can be done without affecting
9945                  * state or bandwidth calcs and this avoids the performance
9946                  * penalty of locking the private state object and
9947                  * allocating a new dc_state.
9948                  */
9949                 if (state->async_update)
9950                         return 0;
9951         }
9952
9953         /* Check scaling and underscan changes*/
9954         /* TODO Removed scaling changes validation due to inability to commit
9955          * new stream into context w\o causing full reset. Need to
9956          * decide how to handle.
9957          */
9958         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9959                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9960                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9961                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9962
9963                 /* Skip any modesets/resets */
9964                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9965                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9966                         continue;
9967
9968                 /* Skip any thing not scale or underscan changes */
9969                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9970                         continue;
9971
9972                 lock_and_validation_needed = true;
9973         }
9974
9975         /**
9976          * Streams and planes are reset when there are changes that affect
9977          * bandwidth. Anything that affects bandwidth needs to go through
9978          * DC global validation to ensure that the configuration can be applied
9979          * to hardware.
9980          *
9981          * We have to currently stall out here in atomic_check for outstanding
9982          * commits to finish in this case because our IRQ handlers reference
9983          * DRM state directly - we can end up disabling interrupts too early
9984          * if we don't.
9985          *
9986          * TODO: Remove this stall and drop DM state private objects.
9987          */
9988         if (lock_and_validation_needed) {
9989                 ret = dm_atomic_get_state(state, &dm_state);
9990                 if (ret)
9991                         goto fail;
9992
9993                 ret = do_aquire_global_lock(dev, state);
9994                 if (ret)
9995                         goto fail;
9996
9997 #if defined(CONFIG_DRM_AMD_DC_DCN)
9998                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9999                         goto fail;
10000
10001                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10002                 if (ret)
10003                         goto fail;
10004 #endif
10005
10006                 /*
10007                  * Perform validation of MST topology in the state:
10008                  * We need to perform MST atomic check before calling
10009                  * dc_validate_global_state(), or there is a chance
10010                  * to get stuck in an infinite loop and hang eventually.
10011                  */
10012                 ret = drm_dp_mst_atomic_check(state);
10013                 if (ret)
10014                         goto fail;
10015                 status = dc_validate_global_state(dc, dm_state->context, false);
10016                 if (status != DC_OK) {
10017                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10018                                        dc_status_to_str(status), status);
10019                         ret = -EINVAL;
10020                         goto fail;
10021                 }
10022         } else {
10023                 /*
10024                  * The commit is a fast update. Fast updates shouldn't change
10025                  * the DC context, affect global validation, and can have their
10026                  * commit work done in parallel with other commits not touching
10027                  * the same resource. If we have a new DC context as part of
10028                  * the DM atomic state from validation we need to free it and
10029                  * retain the existing one instead.
10030                  *
10031                  * Furthermore, since the DM atomic state only contains the DC
10032                  * context and can safely be annulled, we can free the state
10033                  * and clear the associated private object now to free
10034                  * some memory and avoid a possible use-after-free later.
10035                  */
10036
10037                 for (i = 0; i < state->num_private_objs; i++) {
10038                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10039
10040                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10041                                 int j = state->num_private_objs-1;
10042
10043                                 dm_atomic_destroy_state(obj,
10044                                                 state->private_objs[i].state);
10045
10046                                 /* If i is not at the end of the array then the
10047                                  * last element needs to be moved to where i was
10048                                  * before the array can safely be truncated.
10049                                  */
10050                                 if (i != j)
10051                                         state->private_objs[i] =
10052                                                 state->private_objs[j];
10053
10054                                 state->private_objs[j].ptr = NULL;
10055                                 state->private_objs[j].state = NULL;
10056                                 state->private_objs[j].old_state = NULL;
10057                                 state->private_objs[j].new_state = NULL;
10058
10059                                 state->num_private_objs = j;
10060                                 break;
10061                         }
10062                 }
10063         }
10064
10065         /* Store the overall update type for use later in atomic check. */
10066         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10067                 struct dm_crtc_state *dm_new_crtc_state =
10068                         to_dm_crtc_state(new_crtc_state);
10069
10070                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10071                                                          UPDATE_TYPE_FULL :
10072                                                          UPDATE_TYPE_FAST;
10073         }
10074
10075         /* Must be success */
10076         WARN_ON(ret);
10077
10078         trace_amdgpu_dm_atomic_check_finish(state, ret);
10079
10080         return ret;
10081
10082 fail:
10083         if (ret == -EDEADLK)
10084                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10085         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10086                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10087         else
10088                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10089
10090         trace_amdgpu_dm_atomic_check_finish(state, ret);
10091
10092         return ret;
10093 }
10094
10095 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10096                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10097 {
10098         uint8_t dpcd_data;
10099         bool capable = false;
10100
10101         if (amdgpu_dm_connector->dc_link &&
10102                 dm_helpers_dp_read_dpcd(
10103                                 NULL,
10104                                 amdgpu_dm_connector->dc_link,
10105                                 DP_DOWN_STREAM_PORT_COUNT,
10106                                 &dpcd_data,
10107                                 sizeof(dpcd_data))) {
10108                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10109         }
10110
10111         return capable;
10112 }
10113
10114 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10115                 uint8_t *edid_ext, int len,
10116                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10117 {
10118         int i;
10119         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10120         struct dc *dc = adev->dm.dc;
10121
10122         /* send extension block to DMCU for parsing */
10123         for (i = 0; i < len; i += 8) {
10124                 bool res;
10125                 int offset;
10126
10127                 /* send 8 bytes a time */
10128                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10129                         return false;
10130
10131                 if (i+8 == len) {
10132                         /* EDID block sent completed, expect result */
10133                         int version, min_rate, max_rate;
10134
10135                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10136                         if (res) {
10137                                 /* amd vsdb found */
10138                                 vsdb_info->freesync_supported = 1;
10139                                 vsdb_info->amd_vsdb_version = version;
10140                                 vsdb_info->min_refresh_rate_hz = min_rate;
10141                                 vsdb_info->max_refresh_rate_hz = max_rate;
10142                                 return true;
10143                         }
10144                         /* not amd vsdb */
10145                         return false;
10146                 }
10147
10148                 /* check for ack*/
10149                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10150                 if (!res)
10151                         return false;
10152         }
10153
10154         return false;
10155 }
10156
10157 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10158                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10159 {
10160         uint8_t *edid_ext = NULL;
10161         int i;
10162         bool valid_vsdb_found = false;
10163
10164         /*----- drm_find_cea_extension() -----*/
10165         /* No EDID or EDID extensions */
10166         if (edid == NULL || edid->extensions == 0)
10167                 return -ENODEV;
10168
10169         /* Find CEA extension */
10170         for (i = 0; i < edid->extensions; i++) {
10171                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10172                 if (edid_ext[0] == CEA_EXT)
10173                         break;
10174         }
10175
10176         if (i == edid->extensions)
10177                 return -ENODEV;
10178
10179         /*----- cea_db_offsets() -----*/
10180         if (edid_ext[0] != CEA_EXT)
10181                 return -ENODEV;
10182
10183         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10184
10185         return valid_vsdb_found ? i : -ENODEV;
10186 }
10187
10188 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10189                                         struct edid *edid)
10190 {
10191         int i = 0;
10192         struct detailed_timing *timing;
10193         struct detailed_non_pixel *data;
10194         struct detailed_data_monitor_range *range;
10195         struct amdgpu_dm_connector *amdgpu_dm_connector =
10196                         to_amdgpu_dm_connector(connector);
10197         struct dm_connector_state *dm_con_state = NULL;
10198
10199         struct drm_device *dev = connector->dev;
10200         struct amdgpu_device *adev = drm_to_adev(dev);
10201         bool freesync_capable = false;
10202         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10203
10204         if (!connector->state) {
10205                 DRM_ERROR("%s - Connector has no state", __func__);
10206                 goto update;
10207         }
10208
10209         if (!edid) {
10210                 dm_con_state = to_dm_connector_state(connector->state);
10211
10212                 amdgpu_dm_connector->min_vfreq = 0;
10213                 amdgpu_dm_connector->max_vfreq = 0;
10214                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10215
10216                 goto update;
10217         }
10218
10219         dm_con_state = to_dm_connector_state(connector->state);
10220
10221         if (!amdgpu_dm_connector->dc_sink) {
10222                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10223                 goto update;
10224         }
10225         if (!adev->dm.freesync_module)
10226                 goto update;
10227
10228
10229         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10230                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10231                 bool edid_check_required = false;
10232
10233                 if (edid) {
10234                         edid_check_required = is_dp_capable_without_timing_msa(
10235                                                 adev->dm.dc,
10236                                                 amdgpu_dm_connector);
10237                 }
10238
10239                 if (edid_check_required == true && (edid->version > 1 ||
10240                    (edid->version == 1 && edid->revision > 1))) {
10241                         for (i = 0; i < 4; i++) {
10242
10243                                 timing  = &edid->detailed_timings[i];
10244                                 data    = &timing->data.other_data;
10245                                 range   = &data->data.range;
10246                                 /*
10247                                  * Check if monitor has continuous frequency mode
10248                                  */
10249                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10250                                         continue;
10251                                 /*
10252                                  * Check for flag range limits only. If flag == 1 then
10253                                  * no additional timing information provided.
10254                                  * Default GTF, GTF Secondary curve and CVT are not
10255                                  * supported
10256                                  */
10257                                 if (range->flags != 1)
10258                                         continue;
10259
10260                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10261                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10262                                 amdgpu_dm_connector->pixel_clock_mhz =
10263                                         range->pixel_clock_mhz * 10;
10264
10265                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10266                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10267
10268                                 break;
10269                         }
10270
10271                         if (amdgpu_dm_connector->max_vfreq -
10272                             amdgpu_dm_connector->min_vfreq > 10) {
10273
10274                                 freesync_capable = true;
10275                         }
10276                 }
10277         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10278                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10279                 if (i >= 0 && vsdb_info.freesync_supported) {
10280                         timing  = &edid->detailed_timings[i];
10281                         data    = &timing->data.other_data;
10282
10283                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10284                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10285                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10286                                 freesync_capable = true;
10287
10288                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10289                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10290                 }
10291         }
10292
10293 update:
10294         if (dm_con_state)
10295                 dm_con_state->freesync_capable = freesync_capable;
10296
10297         if (connector->vrr_capable_property)
10298                 drm_connector_set_vrr_capable_property(connector,
10299                                                        freesync_capable);
10300 }
10301
10302 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10303 {
10304         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10305
10306         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10307                 return;
10308         if (link->type == dc_connection_none)
10309                 return;
10310         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10311                                         dpcd_data, sizeof(dpcd_data))) {
10312                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10313
10314                 if (dpcd_data[0] == 0) {
10315                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10316                         link->psr_settings.psr_feature_enabled = false;
10317                 } else {
10318                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10319                         link->psr_settings.psr_feature_enabled = true;
10320                 }
10321
10322                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10323         }
10324 }
10325
10326 /*
10327  * amdgpu_dm_link_setup_psr() - configure psr link
10328  * @stream: stream state
10329  *
10330  * Return: true if success
10331  */
10332 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10333 {
10334         struct dc_link *link = NULL;
10335         struct psr_config psr_config = {0};
10336         struct psr_context psr_context = {0};
10337         bool ret = false;
10338
10339         if (stream == NULL)
10340                 return false;
10341
10342         link = stream->link;
10343
10344         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10345
10346         if (psr_config.psr_version > 0) {
10347                 psr_config.psr_exit_link_training_required = 0x1;
10348                 psr_config.psr_frame_capture_indication_req = 0;
10349                 psr_config.psr_rfb_setup_time = 0x37;
10350                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10351                 psr_config.allow_smu_optimizations = 0x0;
10352
10353                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10354
10355         }
10356         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10357
10358         return ret;
10359 }
10360
10361 /*
10362  * amdgpu_dm_psr_enable() - enable psr f/w
10363  * @stream: stream state
10364  *
10365  * Return: true if success
10366  */
10367 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10368 {
10369         struct dc_link *link = stream->link;
10370         unsigned int vsync_rate_hz = 0;
10371         struct dc_static_screen_params params = {0};
10372         /* Calculate number of static frames before generating interrupt to
10373          * enter PSR.
10374          */
10375         // Init fail safe of 2 frames static
10376         unsigned int num_frames_static = 2;
10377
10378         DRM_DEBUG_DRIVER("Enabling psr...\n");
10379
10380         vsync_rate_hz = div64_u64(div64_u64((
10381                         stream->timing.pix_clk_100hz * 100),
10382                         stream->timing.v_total),
10383                         stream->timing.h_total);
10384
10385         /* Round up
10386          * Calculate number of frames such that at least 30 ms of time has
10387          * passed.
10388          */
10389         if (vsync_rate_hz != 0) {
10390                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10391                 num_frames_static = (30000 / frame_time_microsec) + 1;
10392         }
10393
10394         params.triggers.cursor_update = true;
10395         params.triggers.overlay_update = true;
10396         params.triggers.surface_update = true;
10397         params.num_frames = num_frames_static;
10398
10399         dc_stream_set_static_screen_params(link->ctx->dc,
10400                                            &stream, 1,
10401                                            &params);
10402
10403         return dc_link_set_psr_allow_active(link, true, false, false);
10404 }
10405
10406 /*
10407  * amdgpu_dm_psr_disable() - disable psr f/w
10408  * @stream:  stream state
10409  *
10410  * Return: true if success
10411  */
10412 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10413 {
10414
10415         DRM_DEBUG_DRIVER("Disabling psr...\n");
10416
10417         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10418 }
10419
10420 /*
10421  * amdgpu_dm_psr_disable() - disable psr f/w
10422  * if psr is enabled on any stream
10423  *
10424  * Return: true if success
10425  */
10426 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10427 {
10428         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10429         return dc_set_psr_allow_active(dm->dc, false);
10430 }
10431
10432 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10433 {
10434         struct amdgpu_device *adev = drm_to_adev(dev);
10435         struct dc *dc = adev->dm.dc;
10436         int i;
10437
10438         mutex_lock(&adev->dm.dc_lock);
10439         if (dc->current_state) {
10440                 for (i = 0; i < dc->current_state->stream_count; ++i)
10441                         dc->current_state->streams[i]
10442                                 ->triggered_crtc_reset.enabled =
10443                                 adev->dm.force_timing_sync;
10444
10445                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10446                 dc_trigger_sync(dc, dc->current_state);
10447         }
10448         mutex_unlock(&adev->dm.dc_lock);
10449 }
10450
10451 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10452                        uint32_t value, const char *func_name)
10453 {
10454 #ifdef DM_CHECK_ADDR_0
10455         if (address == 0) {
10456                 DC_ERR("invalid register write. address = 0");
10457                 return;
10458         }
10459 #endif
10460         cgs_write_register(ctx->cgs_device, address, value);
10461         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10462 }
10463
10464 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10465                           const char *func_name)
10466 {
10467         uint32_t value;
10468 #ifdef DM_CHECK_ADDR_0
10469         if (address == 0) {
10470                 DC_ERR("invalid register read; address = 0\n");
10471                 return 0;
10472         }
10473 #endif
10474
10475         if (ctx->dmub_srv &&
10476             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10477             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10478                 ASSERT(false);
10479                 return 0;
10480         }
10481
10482         value = cgs_read_register(ctx->cgs_device, address);
10483
10484         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10485
10486         return value;
10487 }
This page took 0.687272 seconds and 4 git commands to generate.