]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'for-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[J-linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121
122 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151         switch (link->dpcd_caps.dongle_type) {
152         case DISPLAY_DONGLE_NONE:
153                 return DRM_MODE_SUBCONNECTOR_Native;
154         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155                 return DRM_MODE_SUBCONNECTOR_VGA;
156         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157         case DISPLAY_DONGLE_DP_DVI_DONGLE:
158                 return DRM_MODE_SUBCONNECTOR_DVID;
159         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161                 return DRM_MODE_SUBCONNECTOR_HDMIA;
162         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163         default:
164                 return DRM_MODE_SUBCONNECTOR_Unknown;
165         }
166 }
167
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170         struct dc_link *link = aconnector->dc_link;
171         struct drm_connector *connector = &aconnector->base;
172         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173
174         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175                 return;
176
177         if (aconnector->dc_sink)
178                 subconnector = get_subconnector_type(link);
179
180         drm_object_property_set_value(&connector->base,
181                         connector->dev->mode_config.dp_subconnector_property,
182                         subconnector);
183 }
184
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197                                 struct drm_plane *plane,
198                                 unsigned long possible_crtcs,
199                                 const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201                                struct drm_plane *plane,
202                                uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
205                                     uint32_t link_index,
206                                     struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208                                   struct amdgpu_encoder *aencoder,
209                                   uint32_t link_index);
210
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216                                   struct drm_atomic_state *state);
217
218 static void handle_cursor_update(struct drm_plane *plane,
219                                  struct drm_plane_state *old_plane_state);
220
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229                                  struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245         if (crtc >= adev->mode_info.num_crtc)
246                 return 0;
247         else {
248                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249
250                 if (acrtc->dm_irq_params.stream == NULL) {
251                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252                                   crtc);
253                         return 0;
254                 }
255
256                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257         }
258 }
259
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261                                   u32 *vbl, u32 *position)
262 {
263         uint32_t v_blank_start, v_blank_end, h_position, v_position;
264
265         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266                 return -EINVAL;
267         else {
268                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269
270                 if (acrtc->dm_irq_params.stream ==  NULL) {
271                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272                                   crtc);
273                         return 0;
274                 }
275
276                 /*
277                  * TODO rework base driver to use values directly.
278                  * for now parse it back into reg-format
279                  */
280                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281                                          &v_blank_start,
282                                          &v_blank_end,
283                                          &h_position,
284                                          &v_position);
285
286                 *position = v_position | (h_position << 16);
287                 *vbl = v_blank_start | (v_blank_end << 16);
288         }
289
290         return 0;
291 }
292
293 static bool dm_is_idle(void *handle)
294 {
295         /* XXX todo */
296         return true;
297 }
298
299 static int dm_wait_for_idle(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static bool dm_check_soft_reset(void *handle)
306 {
307         return false;
308 }
309
310 static int dm_soft_reset(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318                      int otg_inst)
319 {
320         struct drm_device *dev = adev_to_drm(adev);
321         struct drm_crtc *crtc;
322         struct amdgpu_crtc *amdgpu_crtc;
323
324         if (WARN_ON(otg_inst == -1))
325                 return adev->mode_info.crtcs[0];
326
327         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328                 amdgpu_crtc = to_amdgpu_crtc(crtc);
329
330                 if (amdgpu_crtc->otg_inst == otg_inst)
331                         return amdgpu_crtc;
332         }
333
334         return NULL;
335 }
336
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339         return acrtc->dm_irq_params.freesync_config.state ==
340                        VRR_STATE_ACTIVE_VARIABLE ||
341                acrtc->dm_irq_params.freesync_config.state ==
342                        VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352                                               struct dm_crtc_state *new_state)
353 {
354         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355                 return true;
356         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357                 return true;
358         else
359                 return false;
360 }
361
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371         struct amdgpu_crtc *amdgpu_crtc;
372         struct common_irq_params *irq_params = interrupt_params;
373         struct amdgpu_device *adev = irq_params->adev;
374         unsigned long flags;
375         struct drm_pending_vblank_event *e;
376         uint32_t vpos, hpos, v_blank_start, v_blank_end;
377         bool vrr_active;
378
379         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380
381         /* IRQ could occur when in initial stage */
382         /* TODO work and BO cleanup */
383         if (amdgpu_crtc == NULL) {
384                 DC_LOG_PFLIP("CRTC is null, returning.\n");
385                 return;
386         }
387
388         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389
390         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392                                                  amdgpu_crtc->pflip_status,
393                                                  AMDGPU_FLIP_SUBMITTED,
394                                                  amdgpu_crtc->crtc_id,
395                                                  amdgpu_crtc);
396                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397                 return;
398         }
399
400         /* page flip completed. */
401         e = amdgpu_crtc->event;
402         amdgpu_crtc->event = NULL;
403
404         WARN_ON(!e);
405
406         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407
408         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
409         if (!vrr_active ||
410             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411                                       &v_blank_end, &hpos, &vpos) ||
412             (vpos < v_blank_start)) {
413                 /* Update to correct count and vblank timestamp if racing with
414                  * vblank irq. This also updates to the correct vblank timestamp
415                  * even in VRR mode, as scanout is past the front-porch atm.
416                  */
417                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418
419                 /* Wake up userspace by sending the pageflip event with proper
420                  * count and timestamp of vblank of flip completion.
421                  */
422                 if (e) {
423                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424
425                         /* Event sent, so done with vblank for this flip */
426                         drm_crtc_vblank_put(&amdgpu_crtc->base);
427                 }
428         } else if (e) {
429                 /* VRR active and inside front-porch: vblank count and
430                  * timestamp for pageflip event will only be up to date after
431                  * drm_crtc_handle_vblank() has been executed from late vblank
432                  * irq handler after start of back-porch (vline 0). We queue the
433                  * pageflip event for send-out by drm_crtc_handle_vblank() with
434                  * updated timestamp and count, once it runs after us.
435                  *
436                  * We need to open-code this instead of using the helper
437                  * drm_crtc_arm_vblank_event(), as that helper would
438                  * call drm_crtc_accurate_vblank_count(), which we must
439                  * not call in VRR mode while we are in front-porch!
440                  */
441
442                 /* sequence will be replaced by real count during send-out. */
443                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444                 e->pipe = amdgpu_crtc->crtc_id;
445
446                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447                 e = NULL;
448         }
449
450         /* Keep track of vblank of this flip for flip throttling. We use the
451          * cooked hw counter, as that one incremented at start of this vblank
452          * of pageflip completion, so last_flip_vblank is the forbidden count
453          * for queueing new pageflips if vsync + VRR is enabled.
454          */
455         amdgpu_crtc->dm_irq_params.last_flip_vblank =
456                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457
458         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460
461         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462                      amdgpu_crtc->crtc_id, amdgpu_crtc,
463                      vrr_active, (int) !e);
464 }
465
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468         struct common_irq_params *irq_params = interrupt_params;
469         struct amdgpu_device *adev = irq_params->adev;
470         struct amdgpu_crtc *acrtc;
471         struct drm_device *drm_dev;
472         struct drm_vblank_crtc *vblank;
473         ktime_t frame_duration_ns, previous_timestamp;
474         unsigned long flags;
475         int vrr_active;
476
477         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478
479         if (acrtc) {
480                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481                 drm_dev = acrtc->base.dev;
482                 vblank = &drm_dev->vblank[acrtc->base.index];
483                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484                 frame_duration_ns = vblank->time - previous_timestamp;
485
486                 if (frame_duration_ns > 0) {
487                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
488                                                 frame_duration_ns,
489                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
491                 }
492
493                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494                               acrtc->crtc_id,
495                               vrr_active);
496
497                 /* Core vblank handling is done here after end of front-porch in
498                  * vrr mode, as vblank timestamping will give valid results
499                  * while now done after front-porch. This will also deliver
500                  * page-flip completion events that have been queued to us
501                  * if a pageflip happened inside front-porch.
502                  */
503                 if (vrr_active) {
504                         drm_crtc_handle_vblank(&acrtc->base);
505
506                         /* BTR processing for pre-DCE12 ASICs */
507                         if (acrtc->dm_irq_params.stream &&
508                             adev->family < AMDGPU_FAMILY_AI) {
509                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510                                 mod_freesync_handle_v_update(
511                                     adev->dm.freesync_module,
512                                     acrtc->dm_irq_params.stream,
513                                     &acrtc->dm_irq_params.vrr_params);
514
515                                 dc_stream_adjust_vmin_vmax(
516                                     adev->dm.dc,
517                                     acrtc->dm_irq_params.stream,
518                                     &acrtc->dm_irq_params.vrr_params.adjust);
519                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520                         }
521                 }
522         }
523 }
524
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534         struct common_irq_params *irq_params = interrupt_params;
535         struct amdgpu_device *adev = irq_params->adev;
536         struct amdgpu_crtc *acrtc;
537         unsigned long flags;
538         int vrr_active;
539
540         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541         if (!acrtc)
542                 return;
543
544         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545
546         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547                       vrr_active, acrtc->dm_irq_params.active_planes);
548
549         /**
550          * Core vblank handling at start of front-porch is only possible
551          * in non-vrr mode, as only there vblank timestamping will give
552          * valid results while done in front-porch. Otherwise defer it
553          * to dm_vupdate_high_irq after end of front-porch.
554          */
555         if (!vrr_active)
556                 drm_crtc_handle_vblank(&acrtc->base);
557
558         /**
559          * Following stuff must happen at start of vblank, for crc
560          * computation and below-the-range btr support in vrr mode.
561          */
562         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563
564         /* BTR updates need to happen before VUPDATE on Vega and above. */
565         if (adev->family < AMDGPU_FAMILY_AI)
566                 return;
567
568         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569
570         if (acrtc->dm_irq_params.stream &&
571             acrtc->dm_irq_params.vrr_params.supported &&
572             acrtc->dm_irq_params.freesync_config.state ==
573                     VRR_STATE_ACTIVE_VARIABLE) {
574                 mod_freesync_handle_v_update(adev->dm.freesync_module,
575                                              acrtc->dm_irq_params.stream,
576                                              &acrtc->dm_irq_params.vrr_params);
577
578                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579                                            &acrtc->dm_irq_params.vrr_params.adjust);
580         }
581
582         /*
583          * If there aren't any active_planes then DCH HUBP may be clock-gated.
584          * In that case, pageflip completion interrupts won't fire and pageflip
585          * completion events won't get delivered. Prevent this by sending
586          * pending pageflip events from here if a flip is still pending.
587          *
588          * If any planes are enabled, use dm_pflip_high_irq() instead, to
589          * avoid race conditions between flip programming and completion,
590          * which could cause too early flip completion events.
591          */
592         if (adev->family >= AMDGPU_FAMILY_RV &&
593             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594             acrtc->dm_irq_params.active_planes == 0) {
595                 if (acrtc->event) {
596                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597                         acrtc->event = NULL;
598                         drm_crtc_vblank_put(&acrtc->base);
599                 }
600                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
601         }
602
603         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617         struct common_irq_params *irq_params = interrupt_params;
618         struct amdgpu_device *adev = irq_params->adev;
619         struct amdgpu_crtc *acrtc;
620
621         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622
623         if (!acrtc)
624                 return;
625
626         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640                                         struct dmub_notification *notify)
641 {
642         if (adev->dm.dmub_notify)
643                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645                 complete(&adev->dm.dmub_aux_transfer_done);
646 }
647
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657                               struct dmub_notification *notify)
658 {
659         struct amdgpu_dm_connector *aconnector;
660         struct amdgpu_dm_connector *hpd_aconnector = NULL;
661         struct drm_connector *connector;
662         struct drm_connector_list_iter iter;
663         struct dc_link *link;
664         uint8_t link_index = 0;
665         struct drm_device *dev;
666
667         if (adev == NULL)
668                 return;
669
670         if (notify == NULL) {
671                 DRM_ERROR("DMUB HPD callback notification was NULL");
672                 return;
673         }
674
675         if (notify->link_index > adev->dm.dc->link_count) {
676                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677                 return;
678         }
679
680         link_index = notify->link_index;
681         link = adev->dm.dc->links[link_index];
682         dev = adev->dm.ddev;
683
684         drm_connector_list_iter_begin(dev, &iter);
685         drm_for_each_connector_iter(connector, &iter) {
686                 aconnector = to_amdgpu_dm_connector(connector);
687                 if (link && aconnector->dc_link == link) {
688                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689                         hpd_aconnector = aconnector;
690                         break;
691                 }
692         }
693         drm_connector_list_iter_end(&iter);
694
695         if (hpd_aconnector) {
696                 if (notify->type == DMUB_NOTIFICATION_HPD)
697                         handle_hpd_irq_helper(hpd_aconnector);
698                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699                         handle_hpd_rx_irq(hpd_aconnector);
700         }
701 }
702
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716                                           enum dmub_notification_type type,
717                                           dmub_notify_interrupt_callback_t callback,
718                                           bool dmub_int_thread_offload)
719 {
720         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721                 adev->dm.dmub_callback[type] = callback;
722                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723         } else
724                 return false;
725
726         return true;
727 }
728
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731         struct dmub_hpd_work *dmub_hpd_wrk;
732
733         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734
735         if (!dmub_hpd_wrk->dmub_notify) {
736                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737                 return;
738         }
739
740         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742                 dmub_hpd_wrk->dmub_notify);
743         }
744
745         kfree(dmub_hpd_wrk->dmub_notify);
746         kfree(dmub_hpd_wrk);
747
748 }
749
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760         struct dmub_notification notify;
761         struct common_irq_params *irq_params = interrupt_params;
762         struct amdgpu_device *adev = irq_params->adev;
763         struct amdgpu_display_manager *dm = &adev->dm;
764         struct dmcub_trace_buf_entry entry = { 0 };
765         uint32_t count = 0;
766         struct dmub_hpd_work *dmub_hpd_wrk;
767         struct dc_link *plink = NULL;
768
769         if (dc_enable_dmub_notifications(adev->dm.dc) &&
770                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771
772                 do {
773                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
776                                 continue;
777                         }
778                         if (!dm->dmub_callback[notify.type]) {
779                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780                                 continue;
781                         }
782                         if (dm->dmub_thread_offload[notify.type] == true) {
783                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784                                 if (!dmub_hpd_wrk) {
785                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786                                         return;
787                                 }
788                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789                                 if (!dmub_hpd_wrk->dmub_notify) {
790                                         kfree(dmub_hpd_wrk);
791                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792                                         return;
793                                 }
794                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795                                 if (dmub_hpd_wrk->dmub_notify)
796                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797                                 dmub_hpd_wrk->adev = adev;
798                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
799                                         plink = adev->dm.dc->links[notify.link_index];
800                                         if (plink) {
801                                                 plink->hpd_status =
802                                                         notify.hpd_status == DP_HPD_PLUG;
803                                         }
804                                 }
805                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806                         } else {
807                                 dm->dmub_callback[notify.type](adev, &notify);
808                         }
809                 } while (notify.pending_notification);
810         }
811
812
813         do {
814                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816                                                         entry.param0, entry.param1);
817
818                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820                 } else
821                         break;
822
823                 count++;
824
825         } while (count <= DMUB_TRACE_MAX_READ);
826
827         if (count > DMUB_TRACE_MAX_READ)
828                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831
832 static int dm_set_clockgating_state(void *handle,
833                   enum amd_clockgating_state state)
834 {
835         return 0;
836 }
837
838 static int dm_set_powergating_state(void *handle,
839                   enum amd_powergating_state state)
840 {
841         return 0;
842 }
843
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850         struct drm_device *dev = connector->dev;
851         struct amdgpu_device *adev = drm_to_adev(dev);
852         struct dm_compressor_info *compressor = &adev->dm.compressor;
853         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854         struct drm_display_mode *mode;
855         unsigned long max_size = 0;
856
857         if (adev->dm.dc->fbc_compressor == NULL)
858                 return;
859
860         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861                 return;
862
863         if (compressor->bo_ptr)
864                 return;
865
866
867         list_for_each_entry(mode, &connector->modes, head) {
868                 if (max_size < mode->htotal * mode->vtotal)
869                         max_size = mode->htotal * mode->vtotal;
870         }
871
872         if (max_size) {
873                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875                             &compressor->gpu_addr, &compressor->cpu_addr);
876
877                 if (r)
878                         DRM_ERROR("DM: Failed to initialize FBC\n");
879                 else {
880                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882                 }
883
884         }
885
886 }
887
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889                                           int pipe, bool *enabled,
890                                           unsigned char *buf, int max_bytes)
891 {
892         struct drm_device *dev = dev_get_drvdata(kdev);
893         struct amdgpu_device *adev = drm_to_adev(dev);
894         struct drm_connector *connector;
895         struct drm_connector_list_iter conn_iter;
896         struct amdgpu_dm_connector *aconnector;
897         int ret = 0;
898
899         *enabled = false;
900
901         mutex_lock(&adev->dm.audio_lock);
902
903         drm_connector_list_iter_begin(dev, &conn_iter);
904         drm_for_each_connector_iter(connector, &conn_iter) {
905                 aconnector = to_amdgpu_dm_connector(connector);
906                 if (aconnector->audio_inst != port)
907                         continue;
908
909                 *enabled = true;
910                 ret = drm_eld_size(connector->eld);
911                 memcpy(buf, connector->eld, min(max_bytes, ret));
912
913                 break;
914         }
915         drm_connector_list_iter_end(&conn_iter);
916
917         mutex_unlock(&adev->dm.audio_lock);
918
919         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920
921         return ret;
922 }
923
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925         .get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929                                        struct device *hda_kdev, void *data)
930 {
931         struct drm_device *dev = dev_get_drvdata(kdev);
932         struct amdgpu_device *adev = drm_to_adev(dev);
933         struct drm_audio_component *acomp = data;
934
935         acomp->ops = &amdgpu_dm_audio_component_ops;
936         acomp->dev = kdev;
937         adev->dm.audio_component = acomp;
938
939         return 0;
940 }
941
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943                                           struct device *hda_kdev, void *data)
944 {
945         struct drm_device *dev = dev_get_drvdata(kdev);
946         struct amdgpu_device *adev = drm_to_adev(dev);
947         struct drm_audio_component *acomp = data;
948
949         acomp->ops = NULL;
950         acomp->dev = NULL;
951         adev->dm.audio_component = NULL;
952 }
953
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955         .bind   = amdgpu_dm_audio_component_bind,
956         .unbind = amdgpu_dm_audio_component_unbind,
957 };
958
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961         int i, ret;
962
963         if (!amdgpu_audio)
964                 return 0;
965
966         adev->mode_info.audio.enabled = true;
967
968         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969
970         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971                 adev->mode_info.audio.pin[i].channels = -1;
972                 adev->mode_info.audio.pin[i].rate = -1;
973                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
974                 adev->mode_info.audio.pin[i].status_bits = 0;
975                 adev->mode_info.audio.pin[i].category_code = 0;
976                 adev->mode_info.audio.pin[i].connected = false;
977                 adev->mode_info.audio.pin[i].id =
978                         adev->dm.dc->res_pool->audios[i]->inst;
979                 adev->mode_info.audio.pin[i].offset = 0;
980         }
981
982         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983         if (ret < 0)
984                 return ret;
985
986         adev->dm.audio_registered = true;
987
988         return 0;
989 }
990
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993         if (!amdgpu_audio)
994                 return;
995
996         if (!adev->mode_info.audio.enabled)
997                 return;
998
999         if (adev->dm.audio_registered) {
1000                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001                 adev->dm.audio_registered = false;
1002         }
1003
1004         /* TODO: Disable audio? */
1005
1006         adev->mode_info.audio.enabled = false;
1007 }
1008
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011         struct drm_audio_component *acomp = adev->dm.audio_component;
1012
1013         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015
1016                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017                                                  pin, -1);
1018         }
1019 }
1020
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023         const struct dmcub_firmware_header_v1_0 *hdr;
1024         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028         struct abm *abm = adev->dm.dc->res_pool->abm;
1029         struct dmub_srv_hw_params hw_params;
1030         enum dmub_status status;
1031         const unsigned char *fw_inst_const, *fw_bss_data;
1032         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033         bool has_hw_support;
1034
1035         if (!dmub_srv)
1036                 /* DMUB isn't supported on the ASIC. */
1037                 return 0;
1038
1039         if (!fb_info) {
1040                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1041                 return -EINVAL;
1042         }
1043
1044         if (!dmub_fw) {
1045                 /* Firmware required for DMUB support. */
1046                 DRM_ERROR("No firmware provided for DMUB.\n");
1047                 return -EINVAL;
1048         }
1049
1050         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051         if (status != DMUB_STATUS_OK) {
1052                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053                 return -EINVAL;
1054         }
1055
1056         if (!has_hw_support) {
1057                 DRM_INFO("DMUB unsupported on ASIC\n");
1058                 return 0;
1059         }
1060
1061         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062         status = dmub_srv_hw_reset(dmub_srv);
1063         if (status != DMUB_STATUS_OK)
1064                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065
1066         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067
1068         fw_inst_const = dmub_fw->data +
1069                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070                         PSP_HEADER_BYTES;
1071
1072         fw_bss_data = dmub_fw->data +
1073                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074                       le32_to_cpu(hdr->inst_const_bytes);
1075
1076         /* Copy firmware and bios info into FB memory. */
1077         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079
1080         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081
1082         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083          * amdgpu_ucode_init_single_fw will load dmub firmware
1084          * fw_inst_const part to cw0; otherwise, the firmware back door load
1085          * will be done by dm_dmub_hw_init
1086          */
1087         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089                                 fw_inst_const_size);
1090         }
1091
1092         if (fw_bss_data_size)
1093                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094                        fw_bss_data, fw_bss_data_size);
1095
1096         /* Copy firmware bios info into FB memory. */
1097         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098                adev->bios_size);
1099
1100         /* Reset regions that need to be reset. */
1101         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106
1107         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109
1110         /* Initialize hardware. */
1111         memset(&hw_params, 0, sizeof(hw_params));
1112         hw_params.fb_base = adev->gmc.fb_start;
1113         hw_params.fb_offset = adev->gmc.aper_base;
1114
1115         /* backdoor load firmware and trigger dmub running */
1116         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117                 hw_params.load_inst_const = true;
1118
1119         if (dmcu)
1120                 hw_params.psp_version = dmcu->psp_version;
1121
1122         for (i = 0; i < fb_info->num_fb; ++i)
1123                 hw_params.fb[i] = &fb_info->fb[i];
1124
1125         switch (adev->ip_versions[DCE_HWIP][0]) {
1126         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127                 hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131                 break;
1132         default:
1133                 break;
1134         }
1135
1136         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137         if (status != DMUB_STATUS_OK) {
1138                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139                 return -EINVAL;
1140         }
1141
1142         /* Wait for firmware load to finish. */
1143         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144         if (status != DMUB_STATUS_OK)
1145                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146
1147         /* Init DMCU and ABM if available. */
1148         if (dmcu && abm) {
1149                 dmcu->funcs->dmcu_init(dmcu);
1150                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151         }
1152
1153         if (!adev->dm.dc->ctx->dmub_srv)
1154                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155         if (!adev->dm.dc->ctx->dmub_srv) {
1156                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157                 return -ENOMEM;
1158         }
1159
1160         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161                  adev->dm.dmcub_fw_version);
1162
1163         return 0;
1164 }
1165
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169         enum dmub_status status;
1170         bool init;
1171
1172         if (!dmub_srv) {
1173                 /* DMUB isn't supported on the ASIC. */
1174                 return;
1175         }
1176
1177         status = dmub_srv_is_hw_init(dmub_srv, &init);
1178         if (status != DMUB_STATUS_OK)
1179                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180
1181         if (status == DMUB_STATUS_OK && init) {
1182                 /* Wait for firmware load to finish. */
1183                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184                 if (status != DMUB_STATUS_OK)
1185                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186         } else {
1187                 /* Perform the full hardware initialization. */
1188                 dm_dmub_hw_init(adev);
1189         }
1190 }
1191
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195         uint64_t pt_base;
1196         uint32_t logical_addr_low;
1197         uint32_t logical_addr_high;
1198         uint32_t agp_base, agp_bot, agp_top;
1199         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200
1201         memset(pa_config, 0, sizeof(*pa_config));
1202
1203         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205
1206         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207                 /*
1208                  * Raven2 has a HW issue that it is unable to use the vram which
1209                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210                  * workaround that increase system aperture high address (add 1)
1211                  * to get rid of the VM fault and hardware hang.
1212                  */
1213                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214         else
1215                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216
1217         agp_base = 0;
1218         agp_bot = adev->gmc.agp_start >> 24;
1219         agp_top = adev->gmc.agp_end >> 24;
1220
1221
1222         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227         page_table_base.low_part = lower_32_bits(pt_base);
1228
1229         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231
1232         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235
1236         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239
1240         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243
1244         pa_config->is_hvm_enabled = 0;
1245
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251         struct vblank_control_work *vblank_work =
1252                 container_of(work, struct vblank_control_work, work);
1253         struct amdgpu_display_manager *dm = vblank_work->dm;
1254
1255         mutex_lock(&dm->dc_lock);
1256
1257         if (vblank_work->enable)
1258                 dm->active_vblank_irq_count++;
1259         else if(dm->active_vblank_irq_count)
1260                 dm->active_vblank_irq_count--;
1261
1262         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263
1264         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265
1266         /* Control PSR based on vblank requirements from OS */
1267         if (vblank_work->stream && vblank_work->stream->link) {
1268                 if (vblank_work->enable) {
1269                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270                                 amdgpu_dm_psr_disable(vblank_work->stream);
1271                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274                         amdgpu_dm_psr_enable(vblank_work->stream);
1275                 }
1276         }
1277
1278         mutex_unlock(&dm->dc_lock);
1279
1280         dc_stream_release(vblank_work->stream);
1281
1282         kfree(vblank_work);
1283 }
1284
1285 #endif
1286
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289         struct hpd_rx_irq_offload_work *offload_work;
1290         struct amdgpu_dm_connector *aconnector;
1291         struct dc_link *dc_link;
1292         struct amdgpu_device *adev;
1293         enum dc_connection_type new_connection_type = dc_connection_none;
1294         unsigned long flags;
1295
1296         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297         aconnector = offload_work->offload_wq->aconnector;
1298
1299         if (!aconnector) {
1300                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301                 goto skip;
1302         }
1303
1304         adev = drm_to_adev(aconnector->base.dev);
1305         dc_link = aconnector->dc_link;
1306
1307         mutex_lock(&aconnector->hpd_lock);
1308         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309                 DRM_ERROR("KMS: Failed to detect connector\n");
1310         mutex_unlock(&aconnector->hpd_lock);
1311
1312         if (new_connection_type == dc_connection_none)
1313                 goto skip;
1314
1315         if (amdgpu_in_reset(adev))
1316                 goto skip;
1317
1318         mutex_lock(&adev->dm.dc_lock);
1319         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320                 dc_link_dp_handle_automated_test(dc_link);
1321         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324                 dc_link_dp_handle_link_loss(dc_link);
1325                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326                 offload_work->offload_wq->is_handling_link_loss = false;
1327                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328         }
1329         mutex_unlock(&adev->dm.dc_lock);
1330
1331 skip:
1332         kfree(offload_work);
1333
1334 }
1335
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338         int max_caps = dc->caps.max_links;
1339         int i = 0;
1340         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341
1342         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343
1344         if (!hpd_rx_offload_wq)
1345                 return NULL;
1346
1347
1348         for (i = 0; i < max_caps; i++) {
1349                 hpd_rx_offload_wq[i].wq =
1350                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351
1352                 if (hpd_rx_offload_wq[i].wq == NULL) {
1353                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354                         return NULL;
1355                 }
1356
1357                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358         }
1359
1360         return hpd_rx_offload_wq;
1361 }
1362
1363 struct amdgpu_stutter_quirk {
1364         u16 chip_vendor;
1365         u16 chip_device;
1366         u16 subsys_vendor;
1367         u16 subsys_device;
1368         u8 revision;
1369 };
1370
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374         { 0, 0, 0, 0, 0 },
1375 };
1376
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380
1381         while (p && p->chip_device != 0) {
1382                 if (pdev->vendor == p->chip_vendor &&
1383                     pdev->device == p->chip_device &&
1384                     pdev->subsystem_vendor == p->subsys_vendor &&
1385                     pdev->subsystem_device == p->subsys_device &&
1386                     pdev->revision == p->revision) {
1387                         return true;
1388                 }
1389                 ++p;
1390         }
1391         return false;
1392 }
1393
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396         struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398         struct dc_callback_init init_params;
1399 #endif
1400         int r;
1401
1402         adev->dm.ddev = adev_to_drm(adev);
1403         adev->dm.adev = adev;
1404
1405         /* Zero all the fields */
1406         memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408         memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410
1411         mutex_init(&adev->dm.dc_lock);
1412         mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414         spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416
1417         if(amdgpu_dm_irq_init(adev)) {
1418                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419                 goto error;
1420         }
1421
1422         init_data.asic_id.chip_family = adev->family;
1423
1424         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426         init_data.asic_id.chip_id = adev->pdev->device;
1427
1428         init_data.asic_id.vram_width = adev->gmc.vram_width;
1429         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430         init_data.asic_id.atombios_base_address =
1431                 adev->mode_info.atom_context->bios;
1432
1433         init_data.driver = adev;
1434
1435         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436
1437         if (!adev->dm.cgs_device) {
1438                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439                 goto error;
1440         }
1441
1442         init_data.cgs_device = adev->dm.cgs_device;
1443
1444         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445
1446         switch (adev->ip_versions[DCE_HWIP][0]) {
1447         case IP_VERSION(2, 1, 0):
1448                 switch (adev->dm.dmcub_fw_version) {
1449                 case 0: /* development */
1450                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1451                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452                         init_data.flags.disable_dmcu = false;
1453                         break;
1454                 default:
1455                         init_data.flags.disable_dmcu = true;
1456                 }
1457                 break;
1458         case IP_VERSION(2, 0, 3):
1459                 init_data.flags.disable_dmcu = true;
1460                 break;
1461         default:
1462                 break;
1463         }
1464
1465         switch (adev->asic_type) {
1466         case CHIP_CARRIZO:
1467         case CHIP_STONEY:
1468                 init_data.flags.gpu_vm_support = true;
1469                 break;
1470         default:
1471                 switch (adev->ip_versions[DCE_HWIP][0]) {
1472                 case IP_VERSION(1, 0, 0):
1473                 case IP_VERSION(1, 0, 1):
1474                         /* enable S/G on PCO and RV2 */
1475                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1477                                 init_data.flags.gpu_vm_support = true;
1478                         break;
1479                 case IP_VERSION(2, 1, 0):
1480                 case IP_VERSION(3, 0, 1):
1481                 case IP_VERSION(3, 1, 2):
1482                 case IP_VERSION(3, 1, 3):
1483                 case IP_VERSION(3, 1, 5):
1484                 case IP_VERSION(3, 1, 6):
1485                         init_data.flags.gpu_vm_support = true;
1486                         break;
1487                 default:
1488                         break;
1489                 }
1490                 break;
1491         }
1492
1493         if (init_data.flags.gpu_vm_support)
1494                 adev->mode_info.gpu_vm_support = true;
1495
1496         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1497                 init_data.flags.fbc_support = true;
1498
1499         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1500                 init_data.flags.multi_mon_pp_mclk_switch = true;
1501
1502         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1503                 init_data.flags.disable_fractional_pwm = true;
1504
1505         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1506                 init_data.flags.edp_no_power_sequencing = true;
1507
1508 #ifdef CONFIG_DRM_AMD_DC_DCN
1509         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1510                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1511         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1512                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1513 #endif
1514
1515         init_data.flags.seamless_boot_edp_requested = false;
1516
1517         if (check_seamless_boot_capability(adev)) {
1518                 init_data.flags.seamless_boot_edp_requested = true;
1519                 init_data.flags.allow_seamless_boot_optimization = true;
1520                 DRM_INFO("Seamless boot condition check passed\n");
1521         }
1522
1523         INIT_LIST_HEAD(&adev->dm.da_list);
1524         /* Display Core create. */
1525         adev->dm.dc = dc_create(&init_data);
1526
1527         if (adev->dm.dc) {
1528                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1529         } else {
1530                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1531                 goto error;
1532         }
1533
1534         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1535                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1536                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1537         }
1538
1539         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1540                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1541         if (dm_should_disable_stutter(adev->pdev))
1542                 adev->dm.dc->debug.disable_stutter = true;
1543
1544         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1545                 adev->dm.dc->debug.disable_stutter = true;
1546
1547         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1548                 adev->dm.dc->debug.disable_dsc = true;
1549                 adev->dm.dc->debug.disable_dsc_edp = true;
1550         }
1551
1552         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1553                 adev->dm.dc->debug.disable_clock_gate = true;
1554
1555         r = dm_dmub_hw_init(adev);
1556         if (r) {
1557                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1558                 goto error;
1559         }
1560
1561         dc_hardware_init(adev->dm.dc);
1562
1563         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1564         if (!adev->dm.hpd_rx_offload_wq) {
1565                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1566                 goto error;
1567         }
1568
1569 #if defined(CONFIG_DRM_AMD_DC_DCN)
1570         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1571                 struct dc_phy_addr_space_config pa_config;
1572
1573                 mmhub_read_system_context(adev, &pa_config);
1574
1575                 // Call the DC init_memory func
1576                 dc_setup_system_context(adev->dm.dc, &pa_config);
1577         }
1578 #endif
1579
1580         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1581         if (!adev->dm.freesync_module) {
1582                 DRM_ERROR(
1583                 "amdgpu: failed to initialize freesync_module.\n");
1584         } else
1585                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1586                                 adev->dm.freesync_module);
1587
1588         amdgpu_dm_init_color_mod();
1589
1590 #if defined(CONFIG_DRM_AMD_DC_DCN)
1591         if (adev->dm.dc->caps.max_links > 0) {
1592                 adev->dm.vblank_control_workqueue =
1593                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1594                 if (!adev->dm.vblank_control_workqueue)
1595                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1596         }
1597 #endif
1598
1599 #ifdef CONFIG_DRM_AMD_DC_HDCP
1600         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1601                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1602
1603                 if (!adev->dm.hdcp_workqueue)
1604                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1605                 else
1606                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1607
1608                 dc_init_callbacks(adev->dm.dc, &init_params);
1609         }
1610 #endif
1611 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1612         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1613 #endif
1614         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1615                 init_completion(&adev->dm.dmub_aux_transfer_done);
1616                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1617                 if (!adev->dm.dmub_notify) {
1618                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1619                         goto error;
1620                 }
1621
1622                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1623                 if (!adev->dm.delayed_hpd_wq) {
1624                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1625                         goto error;
1626                 }
1627
1628                 amdgpu_dm_outbox_init(adev);
1629 #if defined(CONFIG_DRM_AMD_DC_DCN)
1630                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1631                         dmub_aux_setconfig_callback, false)) {
1632                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1633                         goto error;
1634                 }
1635                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1636                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1637                         goto error;
1638                 }
1639                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1640                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1641                         goto error;
1642                 }
1643 #endif /* CONFIG_DRM_AMD_DC_DCN */
1644         }
1645
1646         if (amdgpu_dm_initialize_drm_device(adev)) {
1647                 DRM_ERROR(
1648                 "amdgpu: failed to initialize sw for display support.\n");
1649                 goto error;
1650         }
1651
1652         /* create fake encoders for MST */
1653         dm_dp_create_fake_mst_encoders(adev);
1654
1655         /* TODO: Add_display_info? */
1656
1657         /* TODO use dynamic cursor width */
1658         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1659         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1660
1661         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1662                 DRM_ERROR(
1663                 "amdgpu: failed to initialize sw for display support.\n");
1664                 goto error;
1665         }
1666
1667
1668         DRM_DEBUG_DRIVER("KMS initialized.\n");
1669
1670         return 0;
1671 error:
1672         amdgpu_dm_fini(adev);
1673
1674         return -EINVAL;
1675 }
1676
1677 static int amdgpu_dm_early_fini(void *handle)
1678 {
1679         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680
1681         amdgpu_dm_audio_fini(adev);
1682
1683         return 0;
1684 }
1685
1686 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1687 {
1688         int i;
1689
1690 #if defined(CONFIG_DRM_AMD_DC_DCN)
1691         if (adev->dm.vblank_control_workqueue) {
1692                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1693                 adev->dm.vblank_control_workqueue = NULL;
1694         }
1695 #endif
1696
1697         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1698                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1699         }
1700
1701         amdgpu_dm_destroy_drm_device(&adev->dm);
1702
1703 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1704         if (adev->dm.crc_rd_wrk) {
1705                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1706                 kfree(adev->dm.crc_rd_wrk);
1707                 adev->dm.crc_rd_wrk = NULL;
1708         }
1709 #endif
1710 #ifdef CONFIG_DRM_AMD_DC_HDCP
1711         if (adev->dm.hdcp_workqueue) {
1712                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1713                 adev->dm.hdcp_workqueue = NULL;
1714         }
1715
1716         if (adev->dm.dc)
1717                 dc_deinit_callbacks(adev->dm.dc);
1718 #endif
1719
1720         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1721
1722         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1723                 kfree(adev->dm.dmub_notify);
1724                 adev->dm.dmub_notify = NULL;
1725                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1726                 adev->dm.delayed_hpd_wq = NULL;
1727         }
1728
1729         if (adev->dm.dmub_bo)
1730                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1731                                       &adev->dm.dmub_bo_gpu_addr,
1732                                       &adev->dm.dmub_bo_cpu_addr);
1733
1734         if (adev->dm.hpd_rx_offload_wq) {
1735                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1736                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1737                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1738                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1739                         }
1740                 }
1741
1742                 kfree(adev->dm.hpd_rx_offload_wq);
1743                 adev->dm.hpd_rx_offload_wq = NULL;
1744         }
1745
1746         /* DC Destroy TODO: Replace destroy DAL */
1747         if (adev->dm.dc)
1748                 dc_destroy(&adev->dm.dc);
1749         /*
1750          * TODO: pageflip, vlank interrupt
1751          *
1752          * amdgpu_dm_irq_fini(adev);
1753          */
1754
1755         if (adev->dm.cgs_device) {
1756                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1757                 adev->dm.cgs_device = NULL;
1758         }
1759         if (adev->dm.freesync_module) {
1760                 mod_freesync_destroy(adev->dm.freesync_module);
1761                 adev->dm.freesync_module = NULL;
1762         }
1763
1764         mutex_destroy(&adev->dm.audio_lock);
1765         mutex_destroy(&adev->dm.dc_lock);
1766
1767         return;
1768 }
1769
1770 static int load_dmcu_fw(struct amdgpu_device *adev)
1771 {
1772         const char *fw_name_dmcu = NULL;
1773         int r;
1774         const struct dmcu_firmware_header_v1_0 *hdr;
1775
1776         switch(adev->asic_type) {
1777 #if defined(CONFIG_DRM_AMD_DC_SI)
1778         case CHIP_TAHITI:
1779         case CHIP_PITCAIRN:
1780         case CHIP_VERDE:
1781         case CHIP_OLAND:
1782 #endif
1783         case CHIP_BONAIRE:
1784         case CHIP_HAWAII:
1785         case CHIP_KAVERI:
1786         case CHIP_KABINI:
1787         case CHIP_MULLINS:
1788         case CHIP_TONGA:
1789         case CHIP_FIJI:
1790         case CHIP_CARRIZO:
1791         case CHIP_STONEY:
1792         case CHIP_POLARIS11:
1793         case CHIP_POLARIS10:
1794         case CHIP_POLARIS12:
1795         case CHIP_VEGAM:
1796         case CHIP_VEGA10:
1797         case CHIP_VEGA12:
1798         case CHIP_VEGA20:
1799                 return 0;
1800         case CHIP_NAVI12:
1801                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1802                 break;
1803         case CHIP_RAVEN:
1804                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1805                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1806                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1807                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808                 else
1809                         return 0;
1810                 break;
1811         default:
1812                 switch (adev->ip_versions[DCE_HWIP][0]) {
1813                 case IP_VERSION(2, 0, 2):
1814                 case IP_VERSION(2, 0, 3):
1815                 case IP_VERSION(2, 0, 0):
1816                 case IP_VERSION(2, 1, 0):
1817                 case IP_VERSION(3, 0, 0):
1818                 case IP_VERSION(3, 0, 2):
1819                 case IP_VERSION(3, 0, 3):
1820                 case IP_VERSION(3, 0, 1):
1821                 case IP_VERSION(3, 1, 2):
1822                 case IP_VERSION(3, 1, 3):
1823                 case IP_VERSION(3, 1, 5):
1824                 case IP_VERSION(3, 1, 6):
1825                         return 0;
1826                 default:
1827                         break;
1828                 }
1829                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1830                 return -EINVAL;
1831         }
1832
1833         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1834                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1835                 return 0;
1836         }
1837
1838         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1839         if (r == -ENOENT) {
1840                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1841                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1842                 adev->dm.fw_dmcu = NULL;
1843                 return 0;
1844         }
1845         if (r) {
1846                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1847                         fw_name_dmcu);
1848                 return r;
1849         }
1850
1851         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1852         if (r) {
1853                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1854                         fw_name_dmcu);
1855                 release_firmware(adev->dm.fw_dmcu);
1856                 adev->dm.fw_dmcu = NULL;
1857                 return r;
1858         }
1859
1860         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1861         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1862         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1863         adev->firmware.fw_size +=
1864                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1865
1866         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1867         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1868         adev->firmware.fw_size +=
1869                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870
1871         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1872
1873         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1874
1875         return 0;
1876 }
1877
1878 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1879 {
1880         struct amdgpu_device *adev = ctx;
1881
1882         return dm_read_reg(adev->dm.dc->ctx, address);
1883 }
1884
1885 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1886                                      uint32_t value)
1887 {
1888         struct amdgpu_device *adev = ctx;
1889
1890         return dm_write_reg(adev->dm.dc->ctx, address, value);
1891 }
1892
1893 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1894 {
1895         struct dmub_srv_create_params create_params;
1896         struct dmub_srv_region_params region_params;
1897         struct dmub_srv_region_info region_info;
1898         struct dmub_srv_fb_params fb_params;
1899         struct dmub_srv_fb_info *fb_info;
1900         struct dmub_srv *dmub_srv;
1901         const struct dmcub_firmware_header_v1_0 *hdr;
1902         const char *fw_name_dmub;
1903         enum dmub_asic dmub_asic;
1904         enum dmub_status status;
1905         int r;
1906
1907         switch (adev->ip_versions[DCE_HWIP][0]) {
1908         case IP_VERSION(2, 1, 0):
1909                 dmub_asic = DMUB_ASIC_DCN21;
1910                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1911                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1912                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1913                 break;
1914         case IP_VERSION(3, 0, 0):
1915                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1916                         dmub_asic = DMUB_ASIC_DCN30;
1917                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1918                 } else {
1919                         dmub_asic = DMUB_ASIC_DCN30;
1920                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1921                 }
1922                 break;
1923         case IP_VERSION(3, 0, 1):
1924                 dmub_asic = DMUB_ASIC_DCN301;
1925                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1926                 break;
1927         case IP_VERSION(3, 0, 2):
1928                 dmub_asic = DMUB_ASIC_DCN302;
1929                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1930                 break;
1931         case IP_VERSION(3, 0, 3):
1932                 dmub_asic = DMUB_ASIC_DCN303;
1933                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1934                 break;
1935         case IP_VERSION(3, 1, 2):
1936         case IP_VERSION(3, 1, 3):
1937                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1938                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1939                 break;
1940         case IP_VERSION(3, 1, 5):
1941                 dmub_asic = DMUB_ASIC_DCN315;
1942                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1943                 break;
1944         case IP_VERSION(3, 1, 6):
1945                 dmub_asic = DMUB_ASIC_DCN316;
1946                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1947                 break;
1948         default:
1949                 /* ASIC doesn't support DMUB. */
1950                 return 0;
1951         }
1952
1953         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954         if (r) {
1955                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1956                 return 0;
1957         }
1958
1959         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960         if (r) {
1961                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1962                 return 0;
1963         }
1964
1965         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1966         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1967
1968         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1969                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1970                         AMDGPU_UCODE_ID_DMCUB;
1971                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972                         adev->dm.dmub_fw;
1973                 adev->firmware.fw_size +=
1974                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1975
1976                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1977                          adev->dm.dmcub_fw_version);
1978         }
1979
1980
1981         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1982         dmub_srv = adev->dm.dmub_srv;
1983
1984         if (!dmub_srv) {
1985                 DRM_ERROR("Failed to allocate DMUB service!\n");
1986                 return -ENOMEM;
1987         }
1988
1989         memset(&create_params, 0, sizeof(create_params));
1990         create_params.user_ctx = adev;
1991         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1992         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1993         create_params.asic = dmub_asic;
1994
1995         /* Create the DMUB service. */
1996         status = dmub_srv_create(dmub_srv, &create_params);
1997         if (status != DMUB_STATUS_OK) {
1998                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1999                 return -EINVAL;
2000         }
2001
2002         /* Calculate the size of all the regions for the DMUB service. */
2003         memset(&region_params, 0, sizeof(region_params));
2004
2005         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2006                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2007         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2008         region_params.vbios_size = adev->bios_size;
2009         region_params.fw_bss_data = region_params.bss_data_size ?
2010                 adev->dm.dmub_fw->data +
2011                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2012                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2013         region_params.fw_inst_const =
2014                 adev->dm.dmub_fw->data +
2015                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2016                 PSP_HEADER_BYTES;
2017
2018         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2019                                            &region_info);
2020
2021         if (status != DMUB_STATUS_OK) {
2022                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2023                 return -EINVAL;
2024         }
2025
2026         /*
2027          * Allocate a framebuffer based on the total size of all the regions.
2028          * TODO: Move this into GART.
2029          */
2030         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2031                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2032                                     &adev->dm.dmub_bo_gpu_addr,
2033                                     &adev->dm.dmub_bo_cpu_addr);
2034         if (r)
2035                 return r;
2036
2037         /* Rebase the regions on the framebuffer address. */
2038         memset(&fb_params, 0, sizeof(fb_params));
2039         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2040         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2041         fb_params.region_info = &region_info;
2042
2043         adev->dm.dmub_fb_info =
2044                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2045         fb_info = adev->dm.dmub_fb_info;
2046
2047         if (!fb_info) {
2048                 DRM_ERROR(
2049                         "Failed to allocate framebuffer info for DMUB service!\n");
2050                 return -ENOMEM;
2051         }
2052
2053         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2054         if (status != DMUB_STATUS_OK) {
2055                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2056                 return -EINVAL;
2057         }
2058
2059         return 0;
2060 }
2061
2062 static int dm_sw_init(void *handle)
2063 {
2064         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2065         int r;
2066
2067         r = dm_dmub_sw_init(adev);
2068         if (r)
2069                 return r;
2070
2071         return load_dmcu_fw(adev);
2072 }
2073
2074 static int dm_sw_fini(void *handle)
2075 {
2076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077
2078         kfree(adev->dm.dmub_fb_info);
2079         adev->dm.dmub_fb_info = NULL;
2080
2081         if (adev->dm.dmub_srv) {
2082                 dmub_srv_destroy(adev->dm.dmub_srv);
2083                 adev->dm.dmub_srv = NULL;
2084         }
2085
2086         release_firmware(adev->dm.dmub_fw);
2087         adev->dm.dmub_fw = NULL;
2088
2089         release_firmware(adev->dm.fw_dmcu);
2090         adev->dm.fw_dmcu = NULL;
2091
2092         return 0;
2093 }
2094
2095 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2096 {
2097         struct amdgpu_dm_connector *aconnector;
2098         struct drm_connector *connector;
2099         struct drm_connector_list_iter iter;
2100         int ret = 0;
2101
2102         drm_connector_list_iter_begin(dev, &iter);
2103         drm_for_each_connector_iter(connector, &iter) {
2104                 aconnector = to_amdgpu_dm_connector(connector);
2105                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2106                     aconnector->mst_mgr.aux) {
2107                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2108                                          aconnector,
2109                                          aconnector->base.base.id);
2110
2111                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112                         if (ret < 0) {
2113                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2114                                 aconnector->dc_link->type =
2115                                         dc_connection_single;
2116                                 break;
2117                         }
2118                 }
2119         }
2120         drm_connector_list_iter_end(&iter);
2121
2122         return ret;
2123 }
2124
2125 static int dm_late_init(void *handle)
2126 {
2127         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128
2129         struct dmcu_iram_parameters params;
2130         unsigned int linear_lut[16];
2131         int i;
2132         struct dmcu *dmcu = NULL;
2133
2134         dmcu = adev->dm.dc->res_pool->dmcu;
2135
2136         for (i = 0; i < 16; i++)
2137                 linear_lut[i] = 0xFFFF * i / 15;
2138
2139         params.set = 0;
2140         params.backlight_ramping_override = false;
2141         params.backlight_ramping_start = 0xCCCC;
2142         params.backlight_ramping_reduction = 0xCCCCCCCC;
2143         params.backlight_lut_array_size = 16;
2144         params.backlight_lut_array = linear_lut;
2145
2146         /* Min backlight level after ABM reduction,  Don't allow below 1%
2147          * 0xFFFF x 0.01 = 0x28F
2148          */
2149         params.min_abm_backlight = 0x28F;
2150         /* In the case where abm is implemented on dmcub,
2151         * dmcu object will be null.
2152         * ABM 2.4 and up are implemented on dmcub.
2153         */
2154         if (dmcu) {
2155                 if (!dmcu_load_iram(dmcu, params))
2156                         return -EINVAL;
2157         } else if (adev->dm.dc->ctx->dmub_srv) {
2158                 struct dc_link *edp_links[MAX_NUM_EDP];
2159                 int edp_num;
2160
2161                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2162                 for (i = 0; i < edp_num; i++) {
2163                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2164                                 return -EINVAL;
2165                 }
2166         }
2167
2168         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2169 }
2170
2171 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172 {
2173         struct amdgpu_dm_connector *aconnector;
2174         struct drm_connector *connector;
2175         struct drm_connector_list_iter iter;
2176         struct drm_dp_mst_topology_mgr *mgr;
2177         int ret;
2178         bool need_hotplug = false;
2179
2180         drm_connector_list_iter_begin(dev, &iter);
2181         drm_for_each_connector_iter(connector, &iter) {
2182                 aconnector = to_amdgpu_dm_connector(connector);
2183                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2184                     aconnector->mst_port)
2185                         continue;
2186
2187                 mgr = &aconnector->mst_mgr;
2188
2189                 if (suspend) {
2190                         drm_dp_mst_topology_mgr_suspend(mgr);
2191                 } else {
2192                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2193                         if (ret < 0) {
2194                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2195                                 need_hotplug = true;
2196                         }
2197                 }
2198         }
2199         drm_connector_list_iter_end(&iter);
2200
2201         if (need_hotplug)
2202                 drm_kms_helper_hotplug_event(dev);
2203 }
2204
2205 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2206 {
2207         int ret = 0;
2208
2209         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2210          * on window driver dc implementation.
2211          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2212          * should be passed to smu during boot up and resume from s3.
2213          * boot up: dc calculate dcn watermark clock settings within dc_create,
2214          * dcn20_resource_construct
2215          * then call pplib functions below to pass the settings to smu:
2216          * smu_set_watermarks_for_clock_ranges
2217          * smu_set_watermarks_table
2218          * navi10_set_watermarks_table
2219          * smu_write_watermarks_table
2220          *
2221          * For Renoir, clock settings of dcn watermark are also fixed values.
2222          * dc has implemented different flow for window driver:
2223          * dc_hardware_init / dc_set_power_state
2224          * dcn10_init_hw
2225          * notify_wm_ranges
2226          * set_wm_ranges
2227          * -- Linux
2228          * smu_set_watermarks_for_clock_ranges
2229          * renoir_set_watermarks_table
2230          * smu_write_watermarks_table
2231          *
2232          * For Linux,
2233          * dc_hardware_init -> amdgpu_dm_init
2234          * dc_set_power_state --> dm_resume
2235          *
2236          * therefore, this function apply to navi10/12/14 but not Renoir
2237          * *
2238          */
2239         switch (adev->ip_versions[DCE_HWIP][0]) {
2240         case IP_VERSION(2, 0, 2):
2241         case IP_VERSION(2, 0, 0):
2242                 break;
2243         default:
2244                 return 0;
2245         }
2246
2247         ret = amdgpu_dpm_write_watermarks_table(adev);
2248         if (ret) {
2249                 DRM_ERROR("Failed to update WMTABLE!\n");
2250                 return ret;
2251         }
2252
2253         return 0;
2254 }
2255
2256 /**
2257  * dm_hw_init() - Initialize DC device
2258  * @handle: The base driver device containing the amdgpu_dm device.
2259  *
2260  * Initialize the &struct amdgpu_display_manager device. This involves calling
2261  * the initializers of each DM component, then populating the struct with them.
2262  *
2263  * Although the function implies hardware initialization, both hardware and
2264  * software are initialized here. Splitting them out to their relevant init
2265  * hooks is a future TODO item.
2266  *
2267  * Some notable things that are initialized here:
2268  *
2269  * - Display Core, both software and hardware
2270  * - DC modules that we need (freesync and color management)
2271  * - DRM software states
2272  * - Interrupt sources and handlers
2273  * - Vblank support
2274  * - Debug FS entries, if enabled
2275  */
2276 static int dm_hw_init(void *handle)
2277 {
2278         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279         /* Create DAL display manager */
2280         amdgpu_dm_init(adev);
2281         amdgpu_dm_hpd_init(adev);
2282
2283         return 0;
2284 }
2285
2286 /**
2287  * dm_hw_fini() - Teardown DC device
2288  * @handle: The base driver device containing the amdgpu_dm device.
2289  *
2290  * Teardown components within &struct amdgpu_display_manager that require
2291  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2292  * were loaded. Also flush IRQ workqueues and disable them.
2293  */
2294 static int dm_hw_fini(void *handle)
2295 {
2296         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297
2298         amdgpu_dm_hpd_fini(adev);
2299
2300         amdgpu_dm_irq_fini(adev);
2301         amdgpu_dm_fini(adev);
2302         return 0;
2303 }
2304
2305
2306 static int dm_enable_vblank(struct drm_crtc *crtc);
2307 static void dm_disable_vblank(struct drm_crtc *crtc);
2308
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310                                  struct dc_state *state, bool enable)
2311 {
2312         enum dc_irq_source irq_source;
2313         struct amdgpu_crtc *acrtc;
2314         int rc = -EBUSY;
2315         int i = 0;
2316
2317         for (i = 0; i < state->stream_count; i++) {
2318                 acrtc = get_crtc_by_otg_inst(
2319                                 adev, state->stream_status[i].primary_otg_inst);
2320
2321                 if (acrtc && state->stream_status[i].plane_count != 0) {
2322                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2326                         if (rc)
2327                                 DRM_WARN("Failed to %s pflip interrupts\n",
2328                                          enable ? "enable" : "disable");
2329
2330                         if (enable) {
2331                                 rc = dm_enable_vblank(&acrtc->base);
2332                                 if (rc)
2333                                         DRM_WARN("Failed to enable vblank interrupts\n");
2334                         } else {
2335                                 dm_disable_vblank(&acrtc->base);
2336                         }
2337
2338                 }
2339         }
2340
2341 }
2342
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345         struct dc_state *context = NULL;
2346         enum dc_status res = DC_ERROR_UNEXPECTED;
2347         int i;
2348         struct dc_stream_state *del_streams[MAX_PIPES];
2349         int del_streams_count = 0;
2350
2351         memset(del_streams, 0, sizeof(del_streams));
2352
2353         context = dc_create_state(dc);
2354         if (context == NULL)
2355                 goto context_alloc_fail;
2356
2357         dc_resource_state_copy_construct_current(dc, context);
2358
2359         /* First remove from context all streams */
2360         for (i = 0; i < context->stream_count; i++) {
2361                 struct dc_stream_state *stream = context->streams[i];
2362
2363                 del_streams[del_streams_count++] = stream;
2364         }
2365
2366         /* Remove all planes for removed streams and then remove the streams */
2367         for (i = 0; i < del_streams_count; i++) {
2368                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369                         res = DC_FAIL_DETACH_SURFACES;
2370                         goto fail;
2371                 }
2372
2373                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374                 if (res != DC_OK)
2375                         goto fail;
2376         }
2377
2378         res = dc_commit_state(dc, context);
2379
2380 fail:
2381         dc_release_state(context);
2382
2383 context_alloc_fail:
2384         return res;
2385 }
2386
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389         int i;
2390
2391         if (dm->hpd_rx_offload_wq) {
2392                 for (i = 0; i < dm->dc->caps.max_links; i++)
2393                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394         }
2395 }
2396
2397 static int dm_suspend(void *handle)
2398 {
2399         struct amdgpu_device *adev = handle;
2400         struct amdgpu_display_manager *dm = &adev->dm;
2401         int ret = 0;
2402
2403         if (amdgpu_in_reset(adev)) {
2404                 mutex_lock(&dm->dc_lock);
2405
2406 #if defined(CONFIG_DRM_AMD_DC_DCN)
2407                 dc_allow_idle_optimizations(adev->dm.dc, false);
2408 #endif
2409
2410                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2411
2412                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2413
2414                 amdgpu_dm_commit_zero_streams(dm->dc);
2415
2416                 amdgpu_dm_irq_suspend(adev);
2417
2418                 hpd_rx_irq_work_suspend(dm);
2419
2420                 return ret;
2421         }
2422
2423         WARN_ON(adev->dm.cached_state);
2424         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2425
2426         s3_handle_mst(adev_to_drm(adev), true);
2427
2428         amdgpu_dm_irq_suspend(adev);
2429
2430         hpd_rx_irq_work_suspend(dm);
2431
2432         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2433
2434         return 0;
2435 }
2436
2437 struct amdgpu_dm_connector *
2438 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2439                                              struct drm_crtc *crtc)
2440 {
2441         uint32_t i;
2442         struct drm_connector_state *new_con_state;
2443         struct drm_connector *connector;
2444         struct drm_crtc *crtc_from_state;
2445
2446         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2447                 crtc_from_state = new_con_state->crtc;
2448
2449                 if (crtc_from_state == crtc)
2450                         return to_amdgpu_dm_connector(connector);
2451         }
2452
2453         return NULL;
2454 }
2455
2456 static void emulated_link_detect(struct dc_link *link)
2457 {
2458         struct dc_sink_init_data sink_init_data = { 0 };
2459         struct display_sink_capability sink_caps = { 0 };
2460         enum dc_edid_status edid_status;
2461         struct dc_context *dc_ctx = link->ctx;
2462         struct dc_sink *sink = NULL;
2463         struct dc_sink *prev_sink = NULL;
2464
2465         link->type = dc_connection_none;
2466         prev_sink = link->local_sink;
2467
2468         if (prev_sink)
2469                 dc_sink_release(prev_sink);
2470
2471         switch (link->connector_signal) {
2472         case SIGNAL_TYPE_HDMI_TYPE_A: {
2473                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2474                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2475                 break;
2476         }
2477
2478         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2479                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2480                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2481                 break;
2482         }
2483
2484         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2485                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2486                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2487                 break;
2488         }
2489
2490         case SIGNAL_TYPE_LVDS: {
2491                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2492                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2493                 break;
2494         }
2495
2496         case SIGNAL_TYPE_EDP: {
2497                 sink_caps.transaction_type =
2498                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2499                 sink_caps.signal = SIGNAL_TYPE_EDP;
2500                 break;
2501         }
2502
2503         case SIGNAL_TYPE_DISPLAY_PORT: {
2504                 sink_caps.transaction_type =
2505                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2506                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2507                 break;
2508         }
2509
2510         default:
2511                 DC_ERROR("Invalid connector type! signal:%d\n",
2512                         link->connector_signal);
2513                 return;
2514         }
2515
2516         sink_init_data.link = link;
2517         sink_init_data.sink_signal = sink_caps.signal;
2518
2519         sink = dc_sink_create(&sink_init_data);
2520         if (!sink) {
2521                 DC_ERROR("Failed to create sink!\n");
2522                 return;
2523         }
2524
2525         /* dc_sink_create returns a new reference */
2526         link->local_sink = sink;
2527
2528         edid_status = dm_helpers_read_local_edid(
2529                         link->ctx,
2530                         link,
2531                         sink);
2532
2533         if (edid_status != EDID_OK)
2534                 DC_ERROR("Failed to read EDID");
2535
2536 }
2537
2538 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2539                                      struct amdgpu_display_manager *dm)
2540 {
2541         struct {
2542                 struct dc_surface_update surface_updates[MAX_SURFACES];
2543                 struct dc_plane_info plane_infos[MAX_SURFACES];
2544                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2545                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2546                 struct dc_stream_update stream_update;
2547         } * bundle;
2548         int k, m;
2549
2550         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2551
2552         if (!bundle) {
2553                 dm_error("Failed to allocate update bundle\n");
2554                 goto cleanup;
2555         }
2556
2557         for (k = 0; k < dc_state->stream_count; k++) {
2558                 bundle->stream_update.stream = dc_state->streams[k];
2559
2560                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2561                         bundle->surface_updates[m].surface =
2562                                 dc_state->stream_status->plane_states[m];
2563                         bundle->surface_updates[m].surface->force_full_update =
2564                                 true;
2565                 }
2566                 dc_commit_updates_for_stream(
2567                         dm->dc, bundle->surface_updates,
2568                         dc_state->stream_status->plane_count,
2569                         dc_state->streams[k], &bundle->stream_update, dc_state);
2570         }
2571
2572 cleanup:
2573         kfree(bundle);
2574
2575         return;
2576 }
2577
2578 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2579 {
2580         struct dc_stream_state *stream_state;
2581         struct amdgpu_dm_connector *aconnector = link->priv;
2582         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2583         struct dc_stream_update stream_update;
2584         bool dpms_off = true;
2585
2586         memset(&stream_update, 0, sizeof(stream_update));
2587         stream_update.dpms_off = &dpms_off;
2588
2589         mutex_lock(&adev->dm.dc_lock);
2590         stream_state = dc_stream_find_from_link(link);
2591
2592         if (stream_state == NULL) {
2593                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2594                 mutex_unlock(&adev->dm.dc_lock);
2595                 return;
2596         }
2597
2598         stream_update.stream = stream_state;
2599         acrtc_state->force_dpms_off = true;
2600         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2601                                      stream_state, &stream_update,
2602                                      stream_state->ctx->dc->current_state);
2603         mutex_unlock(&adev->dm.dc_lock);
2604 }
2605
2606 static int dm_resume(void *handle)
2607 {
2608         struct amdgpu_device *adev = handle;
2609         struct drm_device *ddev = adev_to_drm(adev);
2610         struct amdgpu_display_manager *dm = &adev->dm;
2611         struct amdgpu_dm_connector *aconnector;
2612         struct drm_connector *connector;
2613         struct drm_connector_list_iter iter;
2614         struct drm_crtc *crtc;
2615         struct drm_crtc_state *new_crtc_state;
2616         struct dm_crtc_state *dm_new_crtc_state;
2617         struct drm_plane *plane;
2618         struct drm_plane_state *new_plane_state;
2619         struct dm_plane_state *dm_new_plane_state;
2620         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2621         enum dc_connection_type new_connection_type = dc_connection_none;
2622         struct dc_state *dc_state;
2623         int i, r, j;
2624
2625         if (amdgpu_in_reset(adev)) {
2626                 dc_state = dm->cached_dc_state;
2627
2628                 /*
2629                  * The dc->current_state is backed up into dm->cached_dc_state
2630                  * before we commit 0 streams.
2631                  *
2632                  * DC will clear link encoder assignments on the real state
2633                  * but the changes won't propagate over to the copy we made
2634                  * before the 0 streams commit.
2635                  *
2636                  * DC expects that link encoder assignments are *not* valid
2637                  * when committing a state, so as a workaround we can copy
2638                  * off of the current state.
2639                  *
2640                  * We lose the previous assignments, but we had already
2641                  * commit 0 streams anyway.
2642                  */
2643                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2644
2645                 if (dc_enable_dmub_notifications(adev->dm.dc))
2646                         amdgpu_dm_outbox_init(adev);
2647
2648                 r = dm_dmub_hw_init(adev);
2649                 if (r)
2650                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2651
2652                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2653                 dc_resume(dm->dc);
2654
2655                 amdgpu_dm_irq_resume_early(adev);
2656
2657                 for (i = 0; i < dc_state->stream_count; i++) {
2658                         dc_state->streams[i]->mode_changed = true;
2659                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2660                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2661                                         = 0xffffffff;
2662                         }
2663                 }
2664
2665                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2666
2667                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2668
2669                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2670
2671                 dc_release_state(dm->cached_dc_state);
2672                 dm->cached_dc_state = NULL;
2673
2674                 amdgpu_dm_irq_resume_late(adev);
2675
2676                 mutex_unlock(&dm->dc_lock);
2677
2678                 return 0;
2679         }
2680         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2681         dc_release_state(dm_state->context);
2682         dm_state->context = dc_create_state(dm->dc);
2683         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2684         dc_resource_state_construct(dm->dc, dm_state->context);
2685
2686         /* Re-enable outbox interrupts for DPIA. */
2687         if (dc_enable_dmub_notifications(adev->dm.dc))
2688                 amdgpu_dm_outbox_init(adev);
2689
2690         /* Before powering on DC we need to re-initialize DMUB. */
2691         dm_dmub_hw_resume(adev);
2692
2693         /* power on hardware */
2694         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2695
2696         /* program HPD filter */
2697         dc_resume(dm->dc);
2698
2699         /*
2700          * early enable HPD Rx IRQ, should be done before set mode as short
2701          * pulse interrupts are used for MST
2702          */
2703         amdgpu_dm_irq_resume_early(adev);
2704
2705         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2706         s3_handle_mst(ddev, false);
2707
2708         /* Do detection*/
2709         drm_connector_list_iter_begin(ddev, &iter);
2710         drm_for_each_connector_iter(connector, &iter) {
2711                 aconnector = to_amdgpu_dm_connector(connector);
2712
2713                 /*
2714                  * this is the case when traversing through already created
2715                  * MST connectors, should be skipped
2716                  */
2717                 if (aconnector->mst_port)
2718                         continue;
2719
2720                 mutex_lock(&aconnector->hpd_lock);
2721                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2722                         DRM_ERROR("KMS: Failed to detect connector\n");
2723
2724                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2725                         emulated_link_detect(aconnector->dc_link);
2726                 else
2727                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2728
2729                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2730                         aconnector->fake_enable = false;
2731
2732                 if (aconnector->dc_sink)
2733                         dc_sink_release(aconnector->dc_sink);
2734                 aconnector->dc_sink = NULL;
2735                 amdgpu_dm_update_connector_after_detect(aconnector);
2736                 mutex_unlock(&aconnector->hpd_lock);
2737         }
2738         drm_connector_list_iter_end(&iter);
2739
2740         /* Force mode set in atomic commit */
2741         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2742                 new_crtc_state->active_changed = true;
2743
2744         /*
2745          * atomic_check is expected to create the dc states. We need to release
2746          * them here, since they were duplicated as part of the suspend
2747          * procedure.
2748          */
2749         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2750                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2751                 if (dm_new_crtc_state->stream) {
2752                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2753                         dc_stream_release(dm_new_crtc_state->stream);
2754                         dm_new_crtc_state->stream = NULL;
2755                 }
2756         }
2757
2758         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2759                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2760                 if (dm_new_plane_state->dc_state) {
2761                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2762                         dc_plane_state_release(dm_new_plane_state->dc_state);
2763                         dm_new_plane_state->dc_state = NULL;
2764                 }
2765         }
2766
2767         drm_atomic_helper_resume(ddev, dm->cached_state);
2768
2769         dm->cached_state = NULL;
2770
2771         amdgpu_dm_irq_resume_late(adev);
2772
2773         amdgpu_dm_smu_write_watermarks_table(adev);
2774
2775         return 0;
2776 }
2777
2778 /**
2779  * DOC: DM Lifecycle
2780  *
2781  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2782  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2783  * the base driver's device list to be initialized and torn down accordingly.
2784  *
2785  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2786  */
2787
2788 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2789         .name = "dm",
2790         .early_init = dm_early_init,
2791         .late_init = dm_late_init,
2792         .sw_init = dm_sw_init,
2793         .sw_fini = dm_sw_fini,
2794         .early_fini = amdgpu_dm_early_fini,
2795         .hw_init = dm_hw_init,
2796         .hw_fini = dm_hw_fini,
2797         .suspend = dm_suspend,
2798         .resume = dm_resume,
2799         .is_idle = dm_is_idle,
2800         .wait_for_idle = dm_wait_for_idle,
2801         .check_soft_reset = dm_check_soft_reset,
2802         .soft_reset = dm_soft_reset,
2803         .set_clockgating_state = dm_set_clockgating_state,
2804         .set_powergating_state = dm_set_powergating_state,
2805 };
2806
2807 const struct amdgpu_ip_block_version dm_ip_block =
2808 {
2809         .type = AMD_IP_BLOCK_TYPE_DCE,
2810         .major = 1,
2811         .minor = 0,
2812         .rev = 0,
2813         .funcs = &amdgpu_dm_funcs,
2814 };
2815
2816
2817 /**
2818  * DOC: atomic
2819  *
2820  * *WIP*
2821  */
2822
2823 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2824         .fb_create = amdgpu_display_user_framebuffer_create,
2825         .get_format_info = amd_get_format_info,
2826         .output_poll_changed = drm_fb_helper_output_poll_changed,
2827         .atomic_check = amdgpu_dm_atomic_check,
2828         .atomic_commit = drm_atomic_helper_commit,
2829 };
2830
2831 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2832         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2833 };
2834
2835 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2836 {
2837         u32 max_cll, min_cll, max, min, q, r;
2838         struct amdgpu_dm_backlight_caps *caps;
2839         struct amdgpu_display_manager *dm;
2840         struct drm_connector *conn_base;
2841         struct amdgpu_device *adev;
2842         struct dc_link *link = NULL;
2843         static const u8 pre_computed_values[] = {
2844                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2845                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2846         int i;
2847
2848         if (!aconnector || !aconnector->dc_link)
2849                 return;
2850
2851         link = aconnector->dc_link;
2852         if (link->connector_signal != SIGNAL_TYPE_EDP)
2853                 return;
2854
2855         conn_base = &aconnector->base;
2856         adev = drm_to_adev(conn_base->dev);
2857         dm = &adev->dm;
2858         for (i = 0; i < dm->num_of_edps; i++) {
2859                 if (link == dm->backlight_link[i])
2860                         break;
2861         }
2862         if (i >= dm->num_of_edps)
2863                 return;
2864         caps = &dm->backlight_caps[i];
2865         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2866         caps->aux_support = false;
2867         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2868         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2869
2870         if (caps->ext_caps->bits.oled == 1 /*||
2871             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2872             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2873                 caps->aux_support = true;
2874
2875         if (amdgpu_backlight == 0)
2876                 caps->aux_support = false;
2877         else if (amdgpu_backlight == 1)
2878                 caps->aux_support = true;
2879
2880         /* From the specification (CTA-861-G), for calculating the maximum
2881          * luminance we need to use:
2882          *      Luminance = 50*2**(CV/32)
2883          * Where CV is a one-byte value.
2884          * For calculating this expression we may need float point precision;
2885          * to avoid this complexity level, we take advantage that CV is divided
2886          * by a constant. From the Euclids division algorithm, we know that CV
2887          * can be written as: CV = 32*q + r. Next, we replace CV in the
2888          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2889          * need to pre-compute the value of r/32. For pre-computing the values
2890          * We just used the following Ruby line:
2891          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2892          * The results of the above expressions can be verified at
2893          * pre_computed_values.
2894          */
2895         q = max_cll >> 5;
2896         r = max_cll % 32;
2897         max = (1 << q) * pre_computed_values[r];
2898
2899         // min luminance: maxLum * (CV/255)^2 / 100
2900         q = DIV_ROUND_CLOSEST(min_cll, 255);
2901         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2902
2903         caps->aux_max_input_signal = max;
2904         caps->aux_min_input_signal = min;
2905 }
2906
2907 void amdgpu_dm_update_connector_after_detect(
2908                 struct amdgpu_dm_connector *aconnector)
2909 {
2910         struct drm_connector *connector = &aconnector->base;
2911         struct drm_device *dev = connector->dev;
2912         struct dc_sink *sink;
2913
2914         /* MST handled by drm_mst framework */
2915         if (aconnector->mst_mgr.mst_state == true)
2916                 return;
2917
2918         sink = aconnector->dc_link->local_sink;
2919         if (sink)
2920                 dc_sink_retain(sink);
2921
2922         /*
2923          * Edid mgmt connector gets first update only in mode_valid hook and then
2924          * the connector sink is set to either fake or physical sink depends on link status.
2925          * Skip if already done during boot.
2926          */
2927         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2928                         && aconnector->dc_em_sink) {
2929
2930                 /*
2931                  * For S3 resume with headless use eml_sink to fake stream
2932                  * because on resume connector->sink is set to NULL
2933                  */
2934                 mutex_lock(&dev->mode_config.mutex);
2935
2936                 if (sink) {
2937                         if (aconnector->dc_sink) {
2938                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2939                                 /*
2940                                  * retain and release below are used to
2941                                  * bump up refcount for sink because the link doesn't point
2942                                  * to it anymore after disconnect, so on next crtc to connector
2943                                  * reshuffle by UMD we will get into unwanted dc_sink release
2944                                  */
2945                                 dc_sink_release(aconnector->dc_sink);
2946                         }
2947                         aconnector->dc_sink = sink;
2948                         dc_sink_retain(aconnector->dc_sink);
2949                         amdgpu_dm_update_freesync_caps(connector,
2950                                         aconnector->edid);
2951                 } else {
2952                         amdgpu_dm_update_freesync_caps(connector, NULL);
2953                         if (!aconnector->dc_sink) {
2954                                 aconnector->dc_sink = aconnector->dc_em_sink;
2955                                 dc_sink_retain(aconnector->dc_sink);
2956                         }
2957                 }
2958
2959                 mutex_unlock(&dev->mode_config.mutex);
2960
2961                 if (sink)
2962                         dc_sink_release(sink);
2963                 return;
2964         }
2965
2966         /*
2967          * TODO: temporary guard to look for proper fix
2968          * if this sink is MST sink, we should not do anything
2969          */
2970         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2971                 dc_sink_release(sink);
2972                 return;
2973         }
2974
2975         if (aconnector->dc_sink == sink) {
2976                 /*
2977                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2978                  * Do nothing!!
2979                  */
2980                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2981                                 aconnector->connector_id);
2982                 if (sink)
2983                         dc_sink_release(sink);
2984                 return;
2985         }
2986
2987         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2988                 aconnector->connector_id, aconnector->dc_sink, sink);
2989
2990         mutex_lock(&dev->mode_config.mutex);
2991
2992         /*
2993          * 1. Update status of the drm connector
2994          * 2. Send an event and let userspace tell us what to do
2995          */
2996         if (sink) {
2997                 /*
2998                  * TODO: check if we still need the S3 mode update workaround.
2999                  * If yes, put it here.
3000                  */
3001                 if (aconnector->dc_sink) {
3002                         amdgpu_dm_update_freesync_caps(connector, NULL);
3003                         dc_sink_release(aconnector->dc_sink);
3004                 }
3005
3006                 aconnector->dc_sink = sink;
3007                 dc_sink_retain(aconnector->dc_sink);
3008                 if (sink->dc_edid.length == 0) {
3009                         aconnector->edid = NULL;
3010                         if (aconnector->dc_link->aux_mode) {
3011                                 drm_dp_cec_unset_edid(
3012                                         &aconnector->dm_dp_aux.aux);
3013                         }
3014                 } else {
3015                         aconnector->edid =
3016                                 (struct edid *)sink->dc_edid.raw_edid;
3017
3018                         if (aconnector->dc_link->aux_mode)
3019                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3020                                                     aconnector->edid);
3021                 }
3022
3023                 drm_connector_update_edid_property(connector, aconnector->edid);
3024                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3025                 update_connector_ext_caps(aconnector);
3026         } else {
3027                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3028                 amdgpu_dm_update_freesync_caps(connector, NULL);
3029                 drm_connector_update_edid_property(connector, NULL);
3030                 aconnector->num_modes = 0;
3031                 dc_sink_release(aconnector->dc_sink);
3032                 aconnector->dc_sink = NULL;
3033                 aconnector->edid = NULL;
3034 #ifdef CONFIG_DRM_AMD_DC_HDCP
3035                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3036                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3037                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3038 #endif
3039         }
3040
3041         mutex_unlock(&dev->mode_config.mutex);
3042
3043         update_subconnector_property(aconnector);
3044
3045         if (sink)
3046                 dc_sink_release(sink);
3047 }
3048
3049 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3050 {
3051         struct drm_connector *connector = &aconnector->base;
3052         struct drm_device *dev = connector->dev;
3053         enum dc_connection_type new_connection_type = dc_connection_none;
3054         struct amdgpu_device *adev = drm_to_adev(dev);
3055         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3056         struct dm_crtc_state *dm_crtc_state = NULL;
3057
3058         if (adev->dm.disable_hpd_irq)
3059                 return;
3060
3061         if (dm_con_state->base.state && dm_con_state->base.crtc)
3062                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3063                                         dm_con_state->base.state,
3064                                         dm_con_state->base.crtc));
3065         /*
3066          * In case of failure or MST no need to update connector status or notify the OS
3067          * since (for MST case) MST does this in its own context.
3068          */
3069         mutex_lock(&aconnector->hpd_lock);
3070
3071 #ifdef CONFIG_DRM_AMD_DC_HDCP
3072         if (adev->dm.hdcp_workqueue) {
3073                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3074                 dm_con_state->update_hdcp = true;
3075         }
3076 #endif
3077         if (aconnector->fake_enable)
3078                 aconnector->fake_enable = false;
3079
3080         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3081                 DRM_ERROR("KMS: Failed to detect connector\n");
3082
3083         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3084                 emulated_link_detect(aconnector->dc_link);
3085
3086                 drm_modeset_lock_all(dev);
3087                 dm_restore_drm_connector_state(dev, connector);
3088                 drm_modeset_unlock_all(dev);
3089
3090                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3091                         drm_kms_helper_connector_hotplug_event(connector);
3092
3093         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3094                 if (new_connection_type == dc_connection_none &&
3095                     aconnector->dc_link->type == dc_connection_none &&
3096                     dm_crtc_state)
3097                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3098
3099                 amdgpu_dm_update_connector_after_detect(aconnector);
3100
3101                 drm_modeset_lock_all(dev);
3102                 dm_restore_drm_connector_state(dev, connector);
3103                 drm_modeset_unlock_all(dev);
3104
3105                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3106                         drm_kms_helper_connector_hotplug_event(connector);
3107         }
3108         mutex_unlock(&aconnector->hpd_lock);
3109
3110 }
3111
3112 static void handle_hpd_irq(void *param)
3113 {
3114         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3115
3116         handle_hpd_irq_helper(aconnector);
3117
3118 }
3119
3120 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3121 {
3122         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3123         uint8_t dret;
3124         bool new_irq_handled = false;
3125         int dpcd_addr;
3126         int dpcd_bytes_to_read;
3127
3128         const int max_process_count = 30;
3129         int process_count = 0;
3130
3131         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3132
3133         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3134                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3135                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3136                 dpcd_addr = DP_SINK_COUNT;
3137         } else {
3138                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3139                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3140                 dpcd_addr = DP_SINK_COUNT_ESI;
3141         }
3142
3143         dret = drm_dp_dpcd_read(
3144                 &aconnector->dm_dp_aux.aux,
3145                 dpcd_addr,
3146                 esi,
3147                 dpcd_bytes_to_read);
3148
3149         while (dret == dpcd_bytes_to_read &&
3150                 process_count < max_process_count) {
3151                 uint8_t retry;
3152                 dret = 0;
3153
3154                 process_count++;
3155
3156                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3157                 /* handle HPD short pulse irq */
3158                 if (aconnector->mst_mgr.mst_state)
3159                         drm_dp_mst_hpd_irq(
3160                                 &aconnector->mst_mgr,
3161                                 esi,
3162                                 &new_irq_handled);
3163
3164                 if (new_irq_handled) {
3165                         /* ACK at DPCD to notify down stream */
3166                         const int ack_dpcd_bytes_to_write =
3167                                 dpcd_bytes_to_read - 1;
3168
3169                         for (retry = 0; retry < 3; retry++) {
3170                                 uint8_t wret;
3171
3172                                 wret = drm_dp_dpcd_write(
3173                                         &aconnector->dm_dp_aux.aux,
3174                                         dpcd_addr + 1,
3175                                         &esi[1],
3176                                         ack_dpcd_bytes_to_write);
3177                                 if (wret == ack_dpcd_bytes_to_write)
3178                                         break;
3179                         }
3180
3181                         /* check if there is new irq to be handled */
3182                         dret = drm_dp_dpcd_read(
3183                                 &aconnector->dm_dp_aux.aux,
3184                                 dpcd_addr,
3185                                 esi,
3186                                 dpcd_bytes_to_read);
3187
3188                         new_irq_handled = false;
3189                 } else {
3190                         break;
3191                 }
3192         }
3193
3194         if (process_count == max_process_count)
3195                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3196 }
3197
3198 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3199                                                         union hpd_irq_data hpd_irq_data)
3200 {
3201         struct hpd_rx_irq_offload_work *offload_work =
3202                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3203
3204         if (!offload_work) {
3205                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3206                 return;
3207         }
3208
3209         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3210         offload_work->data = hpd_irq_data;
3211         offload_work->offload_wq = offload_wq;
3212
3213         queue_work(offload_wq->wq, &offload_work->work);
3214         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3215 }
3216
3217 static void handle_hpd_rx_irq(void *param)
3218 {
3219         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3220         struct drm_connector *connector = &aconnector->base;
3221         struct drm_device *dev = connector->dev;
3222         struct dc_link *dc_link = aconnector->dc_link;
3223         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3224         bool result = false;
3225         enum dc_connection_type new_connection_type = dc_connection_none;
3226         struct amdgpu_device *adev = drm_to_adev(dev);
3227         union hpd_irq_data hpd_irq_data;
3228         bool link_loss = false;
3229         bool has_left_work = false;
3230         int idx = aconnector->base.index;
3231         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3232
3233         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3234
3235         if (adev->dm.disable_hpd_irq)
3236                 return;
3237
3238         /*
3239          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3240          * conflict, after implement i2c helper, this mutex should be
3241          * retired.
3242          */
3243         mutex_lock(&aconnector->hpd_lock);
3244
3245         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3246                                                 &link_loss, true, &has_left_work);
3247
3248         if (!has_left_work)
3249                 goto out;
3250
3251         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3252                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3253                 goto out;
3254         }
3255
3256         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3257                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3258                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3259                         dm_handle_mst_sideband_msg(aconnector);
3260                         goto out;
3261                 }
3262
3263                 if (link_loss) {
3264                         bool skip = false;
3265
3266                         spin_lock(&offload_wq->offload_lock);
3267                         skip = offload_wq->is_handling_link_loss;
3268
3269                         if (!skip)
3270                                 offload_wq->is_handling_link_loss = true;
3271
3272                         spin_unlock(&offload_wq->offload_lock);
3273
3274                         if (!skip)
3275                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3276
3277                         goto out;
3278                 }
3279         }
3280
3281 out:
3282         if (result && !is_mst_root_connector) {
3283                 /* Downstream Port status changed. */
3284                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3285                         DRM_ERROR("KMS: Failed to detect connector\n");
3286
3287                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3288                         emulated_link_detect(dc_link);
3289
3290                         if (aconnector->fake_enable)
3291                                 aconnector->fake_enable = false;
3292
3293                         amdgpu_dm_update_connector_after_detect(aconnector);
3294
3295
3296                         drm_modeset_lock_all(dev);
3297                         dm_restore_drm_connector_state(dev, connector);
3298                         drm_modeset_unlock_all(dev);
3299
3300                         drm_kms_helper_connector_hotplug_event(connector);
3301                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3302
3303                         if (aconnector->fake_enable)
3304                                 aconnector->fake_enable = false;
3305
3306                         amdgpu_dm_update_connector_after_detect(aconnector);
3307
3308
3309                         drm_modeset_lock_all(dev);
3310                         dm_restore_drm_connector_state(dev, connector);
3311                         drm_modeset_unlock_all(dev);
3312
3313                         drm_kms_helper_connector_hotplug_event(connector);
3314                 }
3315         }
3316 #ifdef CONFIG_DRM_AMD_DC_HDCP
3317         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3318                 if (adev->dm.hdcp_workqueue)
3319                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3320         }
3321 #endif
3322
3323         if (dc_link->type != dc_connection_mst_branch)
3324                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3325
3326         mutex_unlock(&aconnector->hpd_lock);
3327 }
3328
3329 static void register_hpd_handlers(struct amdgpu_device *adev)
3330 {
3331         struct drm_device *dev = adev_to_drm(adev);
3332         struct drm_connector *connector;
3333         struct amdgpu_dm_connector *aconnector;
3334         const struct dc_link *dc_link;
3335         struct dc_interrupt_params int_params = {0};
3336
3337         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3338         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3339
3340         list_for_each_entry(connector,
3341                         &dev->mode_config.connector_list, head) {
3342
3343                 aconnector = to_amdgpu_dm_connector(connector);
3344                 dc_link = aconnector->dc_link;
3345
3346                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3347                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3348                         int_params.irq_source = dc_link->irq_source_hpd;
3349
3350                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351                                         handle_hpd_irq,
3352                                         (void *) aconnector);
3353                 }
3354
3355                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3356
3357                         /* Also register for DP short pulse (hpd_rx). */
3358                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3359                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3360
3361                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3362                                         handle_hpd_rx_irq,
3363                                         (void *) aconnector);
3364
3365                         if (adev->dm.hpd_rx_offload_wq)
3366                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3367                                         aconnector;
3368                 }
3369         }
3370 }
3371
3372 #if defined(CONFIG_DRM_AMD_DC_SI)
3373 /* Register IRQ sources and initialize IRQ callbacks */
3374 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3375 {
3376         struct dc *dc = adev->dm.dc;
3377         struct common_irq_params *c_irq_params;
3378         struct dc_interrupt_params int_params = {0};
3379         int r;
3380         int i;
3381         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3382
3383         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3384         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3385
3386         /*
3387          * Actions of amdgpu_irq_add_id():
3388          * 1. Register a set() function with base driver.
3389          *    Base driver will call set() function to enable/disable an
3390          *    interrupt in DC hardware.
3391          * 2. Register amdgpu_dm_irq_handler().
3392          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3393          *    coming from DC hardware.
3394          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3395          *    for acknowledging and handling. */
3396
3397         /* Use VBLANK interrupt */
3398         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3399                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3400                 if (r) {
3401                         DRM_ERROR("Failed to add crtc irq id!\n");
3402                         return r;
3403                 }
3404
3405                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406                 int_params.irq_source =
3407                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3408
3409                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3410
3411                 c_irq_params->adev = adev;
3412                 c_irq_params->irq_src = int_params.irq_source;
3413
3414                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415                                 dm_crtc_high_irq, c_irq_params);
3416         }
3417
3418         /* Use GRPH_PFLIP interrupt */
3419         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3420                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3421                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3422                 if (r) {
3423                         DRM_ERROR("Failed to add page flip irq id!\n");
3424                         return r;
3425                 }
3426
3427                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3428                 int_params.irq_source =
3429                         dc_interrupt_to_irq_source(dc, i, 0);
3430
3431                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3432
3433                 c_irq_params->adev = adev;
3434                 c_irq_params->irq_src = int_params.irq_source;
3435
3436                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3437                                 dm_pflip_high_irq, c_irq_params);
3438
3439         }
3440
3441         /* HPD */
3442         r = amdgpu_irq_add_id(adev, client_id,
3443                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3444         if (r) {
3445                 DRM_ERROR("Failed to add hpd irq id!\n");
3446                 return r;
3447         }
3448
3449         register_hpd_handlers(adev);
3450
3451         return 0;
3452 }
3453 #endif
3454
3455 /* Register IRQ sources and initialize IRQ callbacks */
3456 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3457 {
3458         struct dc *dc = adev->dm.dc;
3459         struct common_irq_params *c_irq_params;
3460         struct dc_interrupt_params int_params = {0};
3461         int r;
3462         int i;
3463         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3464
3465         if (adev->family >= AMDGPU_FAMILY_AI)
3466                 client_id = SOC15_IH_CLIENTID_DCE;
3467
3468         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3469         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3470
3471         /*
3472          * Actions of amdgpu_irq_add_id():
3473          * 1. Register a set() function with base driver.
3474          *    Base driver will call set() function to enable/disable an
3475          *    interrupt in DC hardware.
3476          * 2. Register amdgpu_dm_irq_handler().
3477          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3478          *    coming from DC hardware.
3479          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3480          *    for acknowledging and handling. */
3481
3482         /* Use VBLANK interrupt */
3483         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3484                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3485                 if (r) {
3486                         DRM_ERROR("Failed to add crtc irq id!\n");
3487                         return r;
3488                 }
3489
3490                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491                 int_params.irq_source =
3492                         dc_interrupt_to_irq_source(dc, i, 0);
3493
3494                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3495
3496                 c_irq_params->adev = adev;
3497                 c_irq_params->irq_src = int_params.irq_source;
3498
3499                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3500                                 dm_crtc_high_irq, c_irq_params);
3501         }
3502
3503         /* Use VUPDATE interrupt */
3504         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3505                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3506                 if (r) {
3507                         DRM_ERROR("Failed to add vupdate irq id!\n");
3508                         return r;
3509                 }
3510
3511                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512                 int_params.irq_source =
3513                         dc_interrupt_to_irq_source(dc, i, 0);
3514
3515                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3516
3517                 c_irq_params->adev = adev;
3518                 c_irq_params->irq_src = int_params.irq_source;
3519
3520                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521                                 dm_vupdate_high_irq, c_irq_params);
3522         }
3523
3524         /* Use GRPH_PFLIP interrupt */
3525         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3526                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3527                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3528                 if (r) {
3529                         DRM_ERROR("Failed to add page flip irq id!\n");
3530                         return r;
3531                 }
3532
3533                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3534                 int_params.irq_source =
3535                         dc_interrupt_to_irq_source(dc, i, 0);
3536
3537                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3538
3539                 c_irq_params->adev = adev;
3540                 c_irq_params->irq_src = int_params.irq_source;
3541
3542                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3543                                 dm_pflip_high_irq, c_irq_params);
3544
3545         }
3546
3547         /* HPD */
3548         r = amdgpu_irq_add_id(adev, client_id,
3549                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3550         if (r) {
3551                 DRM_ERROR("Failed to add hpd irq id!\n");
3552                 return r;
3553         }
3554
3555         register_hpd_handlers(adev);
3556
3557         return 0;
3558 }
3559
3560 #if defined(CONFIG_DRM_AMD_DC_DCN)
3561 /* Register IRQ sources and initialize IRQ callbacks */
3562 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3563 {
3564         struct dc *dc = adev->dm.dc;
3565         struct common_irq_params *c_irq_params;
3566         struct dc_interrupt_params int_params = {0};
3567         int r;
3568         int i;
3569 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3570         static const unsigned int vrtl_int_srcid[] = {
3571                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3572                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3573                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3574                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3575                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3576                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3577         };
3578 #endif
3579
3580         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3581         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3582
3583         /*
3584          * Actions of amdgpu_irq_add_id():
3585          * 1. Register a set() function with base driver.
3586          *    Base driver will call set() function to enable/disable an
3587          *    interrupt in DC hardware.
3588          * 2. Register amdgpu_dm_irq_handler().
3589          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3590          *    coming from DC hardware.
3591          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3592          *    for acknowledging and handling.
3593          */
3594
3595         /* Use VSTARTUP interrupt */
3596         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3597                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3598                         i++) {
3599                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3600
3601                 if (r) {
3602                         DRM_ERROR("Failed to add crtc irq id!\n");
3603                         return r;
3604                 }
3605
3606                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607                 int_params.irq_source =
3608                         dc_interrupt_to_irq_source(dc, i, 0);
3609
3610                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3611
3612                 c_irq_params->adev = adev;
3613                 c_irq_params->irq_src = int_params.irq_source;
3614
3615                 amdgpu_dm_irq_register_interrupt(
3616                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3617         }
3618
3619         /* Use otg vertical line interrupt */
3620 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3621         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3622                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3623                                 vrtl_int_srcid[i], &adev->vline0_irq);
3624
3625                 if (r) {
3626                         DRM_ERROR("Failed to add vline0 irq id!\n");
3627                         return r;
3628                 }
3629
3630                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3631                 int_params.irq_source =
3632                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3633
3634                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3635                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3636                         break;
3637                 }
3638
3639                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3640                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3641
3642                 c_irq_params->adev = adev;
3643                 c_irq_params->irq_src = int_params.irq_source;
3644
3645                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3646                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3647         }
3648 #endif
3649
3650         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3651          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3652          * to trigger at end of each vblank, regardless of state of the lock,
3653          * matching DCE behaviour.
3654          */
3655         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3656              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3657              i++) {
3658                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3659
3660                 if (r) {
3661                         DRM_ERROR("Failed to add vupdate irq id!\n");
3662                         return r;
3663                 }
3664
3665                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3666                 int_params.irq_source =
3667                         dc_interrupt_to_irq_source(dc, i, 0);
3668
3669                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3670
3671                 c_irq_params->adev = adev;
3672                 c_irq_params->irq_src = int_params.irq_source;
3673
3674                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675                                 dm_vupdate_high_irq, c_irq_params);
3676         }
3677
3678         /* Use GRPH_PFLIP interrupt */
3679         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3680                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3681                         i++) {
3682                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3683                 if (r) {
3684                         DRM_ERROR("Failed to add page flip irq id!\n");
3685                         return r;
3686                 }
3687
3688                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3689                 int_params.irq_source =
3690                         dc_interrupt_to_irq_source(dc, i, 0);
3691
3692                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3693
3694                 c_irq_params->adev = adev;
3695                 c_irq_params->irq_src = int_params.irq_source;
3696
3697                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3698                                 dm_pflip_high_irq, c_irq_params);
3699
3700         }
3701
3702         /* HPD */
3703         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3704                         &adev->hpd_irq);
3705         if (r) {
3706                 DRM_ERROR("Failed to add hpd irq id!\n");
3707                 return r;
3708         }
3709
3710         register_hpd_handlers(adev);
3711
3712         return 0;
3713 }
3714 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3715 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3716 {
3717         struct dc *dc = adev->dm.dc;
3718         struct common_irq_params *c_irq_params;
3719         struct dc_interrupt_params int_params = {0};
3720         int r, i;
3721
3722         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3723         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3724
3725         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3726                         &adev->dmub_outbox_irq);
3727         if (r) {
3728                 DRM_ERROR("Failed to add outbox irq id!\n");
3729                 return r;
3730         }
3731
3732         if (dc->ctx->dmub_srv) {
3733                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3734                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3735                 int_params.irq_source =
3736                 dc_interrupt_to_irq_source(dc, i, 0);
3737
3738                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3739
3740                 c_irq_params->adev = adev;
3741                 c_irq_params->irq_src = int_params.irq_source;
3742
3743                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3744                                 dm_dmub_outbox1_low_irq, c_irq_params);
3745         }
3746
3747         return 0;
3748 }
3749 #endif
3750
3751 /*
3752  * Acquires the lock for the atomic state object and returns
3753  * the new atomic state.
3754  *
3755  * This should only be called during atomic check.
3756  */
3757 int dm_atomic_get_state(struct drm_atomic_state *state,
3758                         struct dm_atomic_state **dm_state)
3759 {
3760         struct drm_device *dev = state->dev;
3761         struct amdgpu_device *adev = drm_to_adev(dev);
3762         struct amdgpu_display_manager *dm = &adev->dm;
3763         struct drm_private_state *priv_state;
3764
3765         if (*dm_state)
3766                 return 0;
3767
3768         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3769         if (IS_ERR(priv_state))
3770                 return PTR_ERR(priv_state);
3771
3772         *dm_state = to_dm_atomic_state(priv_state);
3773
3774         return 0;
3775 }
3776
3777 static struct dm_atomic_state *
3778 dm_atomic_get_new_state(struct drm_atomic_state *state)
3779 {
3780         struct drm_device *dev = state->dev;
3781         struct amdgpu_device *adev = drm_to_adev(dev);
3782         struct amdgpu_display_manager *dm = &adev->dm;
3783         struct drm_private_obj *obj;
3784         struct drm_private_state *new_obj_state;
3785         int i;
3786
3787         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3788                 if (obj->funcs == dm->atomic_obj.funcs)
3789                         return to_dm_atomic_state(new_obj_state);
3790         }
3791
3792         return NULL;
3793 }
3794
3795 static struct drm_private_state *
3796 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3797 {
3798         struct dm_atomic_state *old_state, *new_state;
3799
3800         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3801         if (!new_state)
3802                 return NULL;
3803
3804         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3805
3806         old_state = to_dm_atomic_state(obj->state);
3807
3808         if (old_state && old_state->context)
3809                 new_state->context = dc_copy_state(old_state->context);
3810
3811         if (!new_state->context) {
3812                 kfree(new_state);
3813                 return NULL;
3814         }
3815
3816         return &new_state->base;
3817 }
3818
3819 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3820                                     struct drm_private_state *state)
3821 {
3822         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3823
3824         if (dm_state && dm_state->context)
3825                 dc_release_state(dm_state->context);
3826
3827         kfree(dm_state);
3828 }
3829
3830 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3831         .atomic_duplicate_state = dm_atomic_duplicate_state,
3832         .atomic_destroy_state = dm_atomic_destroy_state,
3833 };
3834
3835 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3836 {
3837         struct dm_atomic_state *state;
3838         int r;
3839
3840         adev->mode_info.mode_config_initialized = true;
3841
3842         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3843         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3844
3845         adev_to_drm(adev)->mode_config.max_width = 16384;
3846         adev_to_drm(adev)->mode_config.max_height = 16384;
3847
3848         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3849         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3850         /* indicates support for immediate flip */
3851         adev_to_drm(adev)->mode_config.async_page_flip = true;
3852
3853         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3854
3855         state = kzalloc(sizeof(*state), GFP_KERNEL);
3856         if (!state)
3857                 return -ENOMEM;
3858
3859         state->context = dc_create_state(adev->dm.dc);
3860         if (!state->context) {
3861                 kfree(state);
3862                 return -ENOMEM;
3863         }
3864
3865         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3866
3867         drm_atomic_private_obj_init(adev_to_drm(adev),
3868                                     &adev->dm.atomic_obj,
3869                                     &state->base,
3870                                     &dm_atomic_state_funcs);
3871
3872         r = amdgpu_display_modeset_create_props(adev);
3873         if (r) {
3874                 dc_release_state(state->context);
3875                 kfree(state);
3876                 return r;
3877         }
3878
3879         r = amdgpu_dm_audio_init(adev);
3880         if (r) {
3881                 dc_release_state(state->context);
3882                 kfree(state);
3883                 return r;
3884         }
3885
3886         return 0;
3887 }
3888
3889 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3890 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3891 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3892
3893 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3894         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3895
3896 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3897                                             int bl_idx)
3898 {
3899 #if defined(CONFIG_ACPI)
3900         struct amdgpu_dm_backlight_caps caps;
3901
3902         memset(&caps, 0, sizeof(caps));
3903
3904         if (dm->backlight_caps[bl_idx].caps_valid)
3905                 return;
3906
3907         amdgpu_acpi_get_backlight_caps(&caps);
3908         if (caps.caps_valid) {
3909                 dm->backlight_caps[bl_idx].caps_valid = true;
3910                 if (caps.aux_support)
3911                         return;
3912                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3913                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3914         } else {
3915                 dm->backlight_caps[bl_idx].min_input_signal =
3916                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3917                 dm->backlight_caps[bl_idx].max_input_signal =
3918                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3919         }
3920 #else
3921         if (dm->backlight_caps[bl_idx].aux_support)
3922                 return;
3923
3924         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3925         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3926 #endif
3927 }
3928
3929 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3930                                 unsigned *min, unsigned *max)
3931 {
3932         if (!caps)
3933                 return 0;
3934
3935         if (caps->aux_support) {
3936                 // Firmware limits are in nits, DC API wants millinits.
3937                 *max = 1000 * caps->aux_max_input_signal;
3938                 *min = 1000 * caps->aux_min_input_signal;
3939         } else {
3940                 // Firmware limits are 8-bit, PWM control is 16-bit.
3941                 *max = 0x101 * caps->max_input_signal;
3942                 *min = 0x101 * caps->min_input_signal;
3943         }
3944         return 1;
3945 }
3946
3947 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3948                                         uint32_t brightness)
3949 {
3950         unsigned min, max;
3951
3952         if (!get_brightness_range(caps, &min, &max))
3953                 return brightness;
3954
3955         // Rescale 0..255 to min..max
3956         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3957                                        AMDGPU_MAX_BL_LEVEL);
3958 }
3959
3960 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3961                                       uint32_t brightness)
3962 {
3963         unsigned min, max;
3964
3965         if (!get_brightness_range(caps, &min, &max))
3966                 return brightness;
3967
3968         if (brightness < min)
3969                 return 0;
3970         // Rescale min..max to 0..255
3971         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3972                                  max - min);
3973 }
3974
3975 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3976                                          int bl_idx,
3977                                          u32 user_brightness)
3978 {
3979         struct amdgpu_dm_backlight_caps caps;
3980         struct dc_link *link;
3981         u32 brightness;
3982         bool rc;
3983
3984         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3985         caps = dm->backlight_caps[bl_idx];
3986
3987         dm->brightness[bl_idx] = user_brightness;
3988         /* update scratch register */
3989         if (bl_idx == 0)
3990                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3991         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3992         link = (struct dc_link *)dm->backlight_link[bl_idx];
3993
3994         /* Change brightness based on AUX property */
3995         if (caps.aux_support) {
3996                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3997                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3998                 if (!rc)
3999                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4000         } else {
4001                 rc = dc_link_set_backlight_level(link, brightness, 0);
4002                 if (!rc)
4003                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4004         }
4005
4006         return rc ? 0 : 1;
4007 }
4008
4009 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4010 {
4011         struct amdgpu_display_manager *dm = bl_get_data(bd);
4012         int i;
4013
4014         for (i = 0; i < dm->num_of_edps; i++) {
4015                 if (bd == dm->backlight_dev[i])
4016                         break;
4017         }
4018         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4019                 i = 0;
4020         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4021
4022         return 0;
4023 }
4024
4025 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4026                                          int bl_idx)
4027 {
4028         struct amdgpu_dm_backlight_caps caps;
4029         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4030
4031         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4032         caps = dm->backlight_caps[bl_idx];
4033
4034         if (caps.aux_support) {
4035                 u32 avg, peak;
4036                 bool rc;
4037
4038                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4039                 if (!rc)
4040                         return dm->brightness[bl_idx];
4041                 return convert_brightness_to_user(&caps, avg);
4042         } else {
4043                 int ret = dc_link_get_backlight_level(link);
4044
4045                 if (ret == DC_ERROR_UNEXPECTED)
4046                         return dm->brightness[bl_idx];
4047                 return convert_brightness_to_user(&caps, ret);
4048         }
4049 }
4050
4051 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4052 {
4053         struct amdgpu_display_manager *dm = bl_get_data(bd);
4054         int i;
4055
4056         for (i = 0; i < dm->num_of_edps; i++) {
4057                 if (bd == dm->backlight_dev[i])
4058                         break;
4059         }
4060         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4061                 i = 0;
4062         return amdgpu_dm_backlight_get_level(dm, i);
4063 }
4064
4065 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4066         .options = BL_CORE_SUSPENDRESUME,
4067         .get_brightness = amdgpu_dm_backlight_get_brightness,
4068         .update_status  = amdgpu_dm_backlight_update_status,
4069 };
4070
4071 static void
4072 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4073 {
4074         char bl_name[16];
4075         struct backlight_properties props = { 0 };
4076
4077         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4078         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4079
4080         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4081         props.brightness = AMDGPU_MAX_BL_LEVEL;
4082         props.type = BACKLIGHT_RAW;
4083
4084         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4085                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4086
4087         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4088                                                                        adev_to_drm(dm->adev)->dev,
4089                                                                        dm,
4090                                                                        &amdgpu_dm_backlight_ops,
4091                                                                        &props);
4092
4093         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4094                 DRM_ERROR("DM: Backlight registration failed!\n");
4095         else
4096                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4097 }
4098 #endif
4099
4100 static int initialize_plane(struct amdgpu_display_manager *dm,
4101                             struct amdgpu_mode_info *mode_info, int plane_id,
4102                             enum drm_plane_type plane_type,
4103                             const struct dc_plane_cap *plane_cap)
4104 {
4105         struct drm_plane *plane;
4106         unsigned long possible_crtcs;
4107         int ret = 0;
4108
4109         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4110         if (!plane) {
4111                 DRM_ERROR("KMS: Failed to allocate plane\n");
4112                 return -ENOMEM;
4113         }
4114         plane->type = plane_type;
4115
4116         /*
4117          * HACK: IGT tests expect that the primary plane for a CRTC
4118          * can only have one possible CRTC. Only expose support for
4119          * any CRTC if they're not going to be used as a primary plane
4120          * for a CRTC - like overlay or underlay planes.
4121          */
4122         possible_crtcs = 1 << plane_id;
4123         if (plane_id >= dm->dc->caps.max_streams)
4124                 possible_crtcs = 0xff;
4125
4126         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4127
4128         if (ret) {
4129                 DRM_ERROR("KMS: Failed to initialize plane\n");
4130                 kfree(plane);
4131                 return ret;
4132         }
4133
4134         if (mode_info)
4135                 mode_info->planes[plane_id] = plane;
4136
4137         return ret;
4138 }
4139
4140
4141 static void register_backlight_device(struct amdgpu_display_manager *dm,
4142                                       struct dc_link *link)
4143 {
4144 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4145         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4146
4147         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4148             link->type != dc_connection_none) {
4149                 /*
4150                  * Event if registration failed, we should continue with
4151                  * DM initialization because not having a backlight control
4152                  * is better then a black screen.
4153                  */
4154                 if (!dm->backlight_dev[dm->num_of_edps])
4155                         amdgpu_dm_register_backlight_device(dm);
4156
4157                 if (dm->backlight_dev[dm->num_of_edps]) {
4158                         dm->backlight_link[dm->num_of_edps] = link;
4159                         dm->num_of_edps++;
4160                 }
4161         }
4162 #endif
4163 }
4164
4165
4166 /*
4167  * In this architecture, the association
4168  * connector -> encoder -> crtc
4169  * id not really requried. The crtc and connector will hold the
4170  * display_index as an abstraction to use with DAL component
4171  *
4172  * Returns 0 on success
4173  */
4174 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4175 {
4176         struct amdgpu_display_manager *dm = &adev->dm;
4177         int32_t i;
4178         struct amdgpu_dm_connector *aconnector = NULL;
4179         struct amdgpu_encoder *aencoder = NULL;
4180         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4181         uint32_t link_cnt;
4182         int32_t primary_planes;
4183         enum dc_connection_type new_connection_type = dc_connection_none;
4184         const struct dc_plane_cap *plane;
4185         bool psr_feature_enabled = false;
4186
4187         dm->display_indexes_num = dm->dc->caps.max_streams;
4188         /* Update the actual used number of crtc */
4189         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4190
4191         link_cnt = dm->dc->caps.max_links;
4192         if (amdgpu_dm_mode_config_init(dm->adev)) {
4193                 DRM_ERROR("DM: Failed to initialize mode config\n");
4194                 return -EINVAL;
4195         }
4196
4197         /* There is one primary plane per CRTC */
4198         primary_planes = dm->dc->caps.max_streams;
4199         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4200
4201         /*
4202          * Initialize primary planes, implicit planes for legacy IOCTLS.
4203          * Order is reversed to match iteration order in atomic check.
4204          */
4205         for (i = (primary_planes - 1); i >= 0; i--) {
4206                 plane = &dm->dc->caps.planes[i];
4207
4208                 if (initialize_plane(dm, mode_info, i,
4209                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4210                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4211                         goto fail;
4212                 }
4213         }
4214
4215         /*
4216          * Initialize overlay planes, index starting after primary planes.
4217          * These planes have a higher DRM index than the primary planes since
4218          * they should be considered as having a higher z-order.
4219          * Order is reversed to match iteration order in atomic check.
4220          *
4221          * Only support DCN for now, and only expose one so we don't encourage
4222          * userspace to use up all the pipes.
4223          */
4224         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4225                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4226
4227                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4228                         continue;
4229
4230                 if (!plane->blends_with_above || !plane->blends_with_below)
4231                         continue;
4232
4233                 if (!plane->pixel_format_support.argb8888)
4234                         continue;
4235
4236                 if (initialize_plane(dm, NULL, primary_planes + i,
4237                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4238                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4239                         goto fail;
4240                 }
4241
4242                 /* Only create one overlay plane. */
4243                 break;
4244         }
4245
4246         for (i = 0; i < dm->dc->caps.max_streams; i++)
4247                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4248                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4249                         goto fail;
4250                 }
4251
4252 #if defined(CONFIG_DRM_AMD_DC_DCN)
4253         /* Use Outbox interrupt */
4254         switch (adev->ip_versions[DCE_HWIP][0]) {
4255         case IP_VERSION(3, 0, 0):
4256         case IP_VERSION(3, 1, 2):
4257         case IP_VERSION(3, 1, 3):
4258         case IP_VERSION(3, 1, 5):
4259         case IP_VERSION(3, 1, 6):
4260         case IP_VERSION(2, 1, 0):
4261                 if (register_outbox_irq_handlers(dm->adev)) {
4262                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4263                         goto fail;
4264                 }
4265                 break;
4266         default:
4267                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4268                               adev->ip_versions[DCE_HWIP][0]);
4269         }
4270
4271         /* Determine whether to enable PSR support by default. */
4272         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4273                 switch (adev->ip_versions[DCE_HWIP][0]) {
4274                 case IP_VERSION(3, 1, 2):
4275                 case IP_VERSION(3, 1, 3):
4276                 case IP_VERSION(3, 1, 5):
4277                 case IP_VERSION(3, 1, 6):
4278                         psr_feature_enabled = true;
4279                         break;
4280                 default:
4281                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4282                         break;
4283                 }
4284         }
4285 #endif
4286
4287         /* Disable vblank IRQs aggressively for power-saving. */
4288         adev_to_drm(adev)->vblank_disable_immediate = true;
4289
4290         /* loops over all connectors on the board */
4291         for (i = 0; i < link_cnt; i++) {
4292                 struct dc_link *link = NULL;
4293
4294                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4295                         DRM_ERROR(
4296                                 "KMS: Cannot support more than %d display indexes\n",
4297                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4298                         continue;
4299                 }
4300
4301                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4302                 if (!aconnector)
4303                         goto fail;
4304
4305                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4306                 if (!aencoder)
4307                         goto fail;
4308
4309                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4310                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4311                         goto fail;
4312                 }
4313
4314                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4315                         DRM_ERROR("KMS: Failed to initialize connector\n");
4316                         goto fail;
4317                 }
4318
4319                 link = dc_get_link_at_index(dm->dc, i);
4320
4321                 if (!dc_link_detect_sink(link, &new_connection_type))
4322                         DRM_ERROR("KMS: Failed to detect connector\n");
4323
4324                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4325                         emulated_link_detect(link);
4326                         amdgpu_dm_update_connector_after_detect(aconnector);
4327
4328                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4329                         amdgpu_dm_update_connector_after_detect(aconnector);
4330                         register_backlight_device(dm, link);
4331                         if (dm->num_of_edps)
4332                                 update_connector_ext_caps(aconnector);
4333                         if (psr_feature_enabled)
4334                                 amdgpu_dm_set_psr_caps(link);
4335
4336                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4337                          * PSR is also supported.
4338                          */
4339                         if (link->psr_settings.psr_feature_enabled)
4340                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4341                 }
4342
4343
4344         }
4345
4346         /* Software is initialized. Now we can register interrupt handlers. */
4347         switch (adev->asic_type) {
4348 #if defined(CONFIG_DRM_AMD_DC_SI)
4349         case CHIP_TAHITI:
4350         case CHIP_PITCAIRN:
4351         case CHIP_VERDE:
4352         case CHIP_OLAND:
4353                 if (dce60_register_irq_handlers(dm->adev)) {
4354                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4355                         goto fail;
4356                 }
4357                 break;
4358 #endif
4359         case CHIP_BONAIRE:
4360         case CHIP_HAWAII:
4361         case CHIP_KAVERI:
4362         case CHIP_KABINI:
4363         case CHIP_MULLINS:
4364         case CHIP_TONGA:
4365         case CHIP_FIJI:
4366         case CHIP_CARRIZO:
4367         case CHIP_STONEY:
4368         case CHIP_POLARIS11:
4369         case CHIP_POLARIS10:
4370         case CHIP_POLARIS12:
4371         case CHIP_VEGAM:
4372         case CHIP_VEGA10:
4373         case CHIP_VEGA12:
4374         case CHIP_VEGA20:
4375                 if (dce110_register_irq_handlers(dm->adev)) {
4376                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4377                         goto fail;
4378                 }
4379                 break;
4380         default:
4381 #if defined(CONFIG_DRM_AMD_DC_DCN)
4382                 switch (adev->ip_versions[DCE_HWIP][0]) {
4383                 case IP_VERSION(1, 0, 0):
4384                 case IP_VERSION(1, 0, 1):
4385                 case IP_VERSION(2, 0, 2):
4386                 case IP_VERSION(2, 0, 3):
4387                 case IP_VERSION(2, 0, 0):
4388                 case IP_VERSION(2, 1, 0):
4389                 case IP_VERSION(3, 0, 0):
4390                 case IP_VERSION(3, 0, 2):
4391                 case IP_VERSION(3, 0, 3):
4392                 case IP_VERSION(3, 0, 1):
4393                 case IP_VERSION(3, 1, 2):
4394                 case IP_VERSION(3, 1, 3):
4395                 case IP_VERSION(3, 1, 5):
4396                 case IP_VERSION(3, 1, 6):
4397                         if (dcn10_register_irq_handlers(dm->adev)) {
4398                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4399                                 goto fail;
4400                         }
4401                         break;
4402                 default:
4403                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4404                                         adev->ip_versions[DCE_HWIP][0]);
4405                         goto fail;
4406                 }
4407 #endif
4408                 break;
4409         }
4410
4411         return 0;
4412 fail:
4413         kfree(aencoder);
4414         kfree(aconnector);
4415
4416         return -EINVAL;
4417 }
4418
4419 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4420 {
4421         drm_atomic_private_obj_fini(&dm->atomic_obj);
4422         return;
4423 }
4424
4425 /******************************************************************************
4426  * amdgpu_display_funcs functions
4427  *****************************************************************************/
4428
4429 /*
4430  * dm_bandwidth_update - program display watermarks
4431  *
4432  * @adev: amdgpu_device pointer
4433  *
4434  * Calculate and program the display watermarks and line buffer allocation.
4435  */
4436 static void dm_bandwidth_update(struct amdgpu_device *adev)
4437 {
4438         /* TODO: implement later */
4439 }
4440
4441 static const struct amdgpu_display_funcs dm_display_funcs = {
4442         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4443         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4444         .backlight_set_level = NULL, /* never called for DC */
4445         .backlight_get_level = NULL, /* never called for DC */
4446         .hpd_sense = NULL,/* called unconditionally */
4447         .hpd_set_polarity = NULL, /* called unconditionally */
4448         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4449         .page_flip_get_scanoutpos =
4450                 dm_crtc_get_scanoutpos,/* called unconditionally */
4451         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4452         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4453 };
4454
4455 #if defined(CONFIG_DEBUG_KERNEL_DC)
4456
4457 static ssize_t s3_debug_store(struct device *device,
4458                               struct device_attribute *attr,
4459                               const char *buf,
4460                               size_t count)
4461 {
4462         int ret;
4463         int s3_state;
4464         struct drm_device *drm_dev = dev_get_drvdata(device);
4465         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4466
4467         ret = kstrtoint(buf, 0, &s3_state);
4468
4469         if (ret == 0) {
4470                 if (s3_state) {
4471                         dm_resume(adev);
4472                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4473                 } else
4474                         dm_suspend(adev);
4475         }
4476
4477         return ret == 0 ? count : 0;
4478 }
4479
4480 DEVICE_ATTR_WO(s3_debug);
4481
4482 #endif
4483
4484 static int dm_early_init(void *handle)
4485 {
4486         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4487
4488         switch (adev->asic_type) {
4489 #if defined(CONFIG_DRM_AMD_DC_SI)
4490         case CHIP_TAHITI:
4491         case CHIP_PITCAIRN:
4492         case CHIP_VERDE:
4493                 adev->mode_info.num_crtc = 6;
4494                 adev->mode_info.num_hpd = 6;
4495                 adev->mode_info.num_dig = 6;
4496                 break;
4497         case CHIP_OLAND:
4498                 adev->mode_info.num_crtc = 2;
4499                 adev->mode_info.num_hpd = 2;
4500                 adev->mode_info.num_dig = 2;
4501                 break;
4502 #endif
4503         case CHIP_BONAIRE:
4504         case CHIP_HAWAII:
4505                 adev->mode_info.num_crtc = 6;
4506                 adev->mode_info.num_hpd = 6;
4507                 adev->mode_info.num_dig = 6;
4508                 break;
4509         case CHIP_KAVERI:
4510                 adev->mode_info.num_crtc = 4;
4511                 adev->mode_info.num_hpd = 6;
4512                 adev->mode_info.num_dig = 7;
4513                 break;
4514         case CHIP_KABINI:
4515         case CHIP_MULLINS:
4516                 adev->mode_info.num_crtc = 2;
4517                 adev->mode_info.num_hpd = 6;
4518                 adev->mode_info.num_dig = 6;
4519                 break;
4520         case CHIP_FIJI:
4521         case CHIP_TONGA:
4522                 adev->mode_info.num_crtc = 6;
4523                 adev->mode_info.num_hpd = 6;
4524                 adev->mode_info.num_dig = 7;
4525                 break;
4526         case CHIP_CARRIZO:
4527                 adev->mode_info.num_crtc = 3;
4528                 adev->mode_info.num_hpd = 6;
4529                 adev->mode_info.num_dig = 9;
4530                 break;
4531         case CHIP_STONEY:
4532                 adev->mode_info.num_crtc = 2;
4533                 adev->mode_info.num_hpd = 6;
4534                 adev->mode_info.num_dig = 9;
4535                 break;
4536         case CHIP_POLARIS11:
4537         case CHIP_POLARIS12:
4538                 adev->mode_info.num_crtc = 5;
4539                 adev->mode_info.num_hpd = 5;
4540                 adev->mode_info.num_dig = 5;
4541                 break;
4542         case CHIP_POLARIS10:
4543         case CHIP_VEGAM:
4544                 adev->mode_info.num_crtc = 6;
4545                 adev->mode_info.num_hpd = 6;
4546                 adev->mode_info.num_dig = 6;
4547                 break;
4548         case CHIP_VEGA10:
4549         case CHIP_VEGA12:
4550         case CHIP_VEGA20:
4551                 adev->mode_info.num_crtc = 6;
4552                 adev->mode_info.num_hpd = 6;
4553                 adev->mode_info.num_dig = 6;
4554                 break;
4555         default:
4556 #if defined(CONFIG_DRM_AMD_DC_DCN)
4557                 switch (adev->ip_versions[DCE_HWIP][0]) {
4558                 case IP_VERSION(2, 0, 2):
4559                 case IP_VERSION(3, 0, 0):
4560                         adev->mode_info.num_crtc = 6;
4561                         adev->mode_info.num_hpd = 6;
4562                         adev->mode_info.num_dig = 6;
4563                         break;
4564                 case IP_VERSION(2, 0, 0):
4565                 case IP_VERSION(3, 0, 2):
4566                         adev->mode_info.num_crtc = 5;
4567                         adev->mode_info.num_hpd = 5;
4568                         adev->mode_info.num_dig = 5;
4569                         break;
4570                 case IP_VERSION(2, 0, 3):
4571                 case IP_VERSION(3, 0, 3):
4572                         adev->mode_info.num_crtc = 2;
4573                         adev->mode_info.num_hpd = 2;
4574                         adev->mode_info.num_dig = 2;
4575                         break;
4576                 case IP_VERSION(1, 0, 0):
4577                 case IP_VERSION(1, 0, 1):
4578                 case IP_VERSION(3, 0, 1):
4579                 case IP_VERSION(2, 1, 0):
4580                 case IP_VERSION(3, 1, 2):
4581                 case IP_VERSION(3, 1, 3):
4582                 case IP_VERSION(3, 1, 5):
4583                 case IP_VERSION(3, 1, 6):
4584                         adev->mode_info.num_crtc = 4;
4585                         adev->mode_info.num_hpd = 4;
4586                         adev->mode_info.num_dig = 4;
4587                         break;
4588                 default:
4589                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4590                                         adev->ip_versions[DCE_HWIP][0]);
4591                         return -EINVAL;
4592                 }
4593 #endif
4594                 break;
4595         }
4596
4597         amdgpu_dm_set_irq_funcs(adev);
4598
4599         if (adev->mode_info.funcs == NULL)
4600                 adev->mode_info.funcs = &dm_display_funcs;
4601
4602         /*
4603          * Note: Do NOT change adev->audio_endpt_rreg and
4604          * adev->audio_endpt_wreg because they are initialised in
4605          * amdgpu_device_init()
4606          */
4607 #if defined(CONFIG_DEBUG_KERNEL_DC)
4608         device_create_file(
4609                 adev_to_drm(adev)->dev,
4610                 &dev_attr_s3_debug);
4611 #endif
4612
4613         return 0;
4614 }
4615
4616 static bool modeset_required(struct drm_crtc_state *crtc_state,
4617                              struct dc_stream_state *new_stream,
4618                              struct dc_stream_state *old_stream)
4619 {
4620         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4621 }
4622
4623 static bool modereset_required(struct drm_crtc_state *crtc_state)
4624 {
4625         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4626 }
4627
4628 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4629 {
4630         drm_encoder_cleanup(encoder);
4631         kfree(encoder);
4632 }
4633
4634 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4635         .destroy = amdgpu_dm_encoder_destroy,
4636 };
4637
4638
4639 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4640                                          struct drm_framebuffer *fb,
4641                                          int *min_downscale, int *max_upscale)
4642 {
4643         struct amdgpu_device *adev = drm_to_adev(dev);
4644         struct dc *dc = adev->dm.dc;
4645         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4646         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4647
4648         switch (fb->format->format) {
4649         case DRM_FORMAT_P010:
4650         case DRM_FORMAT_NV12:
4651         case DRM_FORMAT_NV21:
4652                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4653                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4654                 break;
4655
4656         case DRM_FORMAT_XRGB16161616F:
4657         case DRM_FORMAT_ARGB16161616F:
4658         case DRM_FORMAT_XBGR16161616F:
4659         case DRM_FORMAT_ABGR16161616F:
4660                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4661                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4662                 break;
4663
4664         default:
4665                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4666                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4667                 break;
4668         }
4669
4670         /*
4671          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4672          * scaling factor of 1.0 == 1000 units.
4673          */
4674         if (*max_upscale == 1)
4675                 *max_upscale = 1000;
4676
4677         if (*min_downscale == 1)
4678                 *min_downscale = 1000;
4679 }
4680
4681
4682 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4683                                 const struct drm_plane_state *state,
4684                                 struct dc_scaling_info *scaling_info)
4685 {
4686         int scale_w, scale_h, min_downscale, max_upscale;
4687
4688         memset(scaling_info, 0, sizeof(*scaling_info));
4689
4690         /* Source is fixed 16.16 but we ignore mantissa for now... */
4691         scaling_info->src_rect.x = state->src_x >> 16;
4692         scaling_info->src_rect.y = state->src_y >> 16;
4693
4694         /*
4695          * For reasons we don't (yet) fully understand a non-zero
4696          * src_y coordinate into an NV12 buffer can cause a
4697          * system hang on DCN1x.
4698          * To avoid hangs (and maybe be overly cautious)
4699          * let's reject both non-zero src_x and src_y.
4700          *
4701          * We currently know of only one use-case to reproduce a
4702          * scenario with non-zero src_x and src_y for NV12, which
4703          * is to gesture the YouTube Android app into full screen
4704          * on ChromeOS.
4705          */
4706         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4707             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4708             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4709             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4710                 return -EINVAL;
4711
4712         scaling_info->src_rect.width = state->src_w >> 16;
4713         if (scaling_info->src_rect.width == 0)
4714                 return -EINVAL;
4715
4716         scaling_info->src_rect.height = state->src_h >> 16;
4717         if (scaling_info->src_rect.height == 0)
4718                 return -EINVAL;
4719
4720         scaling_info->dst_rect.x = state->crtc_x;
4721         scaling_info->dst_rect.y = state->crtc_y;
4722
4723         if (state->crtc_w == 0)
4724                 return -EINVAL;
4725
4726         scaling_info->dst_rect.width = state->crtc_w;
4727
4728         if (state->crtc_h == 0)
4729                 return -EINVAL;
4730
4731         scaling_info->dst_rect.height = state->crtc_h;
4732
4733         /* DRM doesn't specify clipping on destination output. */
4734         scaling_info->clip_rect = scaling_info->dst_rect;
4735
4736         /* Validate scaling per-format with DC plane caps */
4737         if (state->plane && state->plane->dev && state->fb) {
4738                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4739                                              &min_downscale, &max_upscale);
4740         } else {
4741                 min_downscale = 250;
4742                 max_upscale = 16000;
4743         }
4744
4745         scale_w = scaling_info->dst_rect.width * 1000 /
4746                   scaling_info->src_rect.width;
4747
4748         if (scale_w < min_downscale || scale_w > max_upscale)
4749                 return -EINVAL;
4750
4751         scale_h = scaling_info->dst_rect.height * 1000 /
4752                   scaling_info->src_rect.height;
4753
4754         if (scale_h < min_downscale || scale_h > max_upscale)
4755                 return -EINVAL;
4756
4757         /*
4758          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4759          * assume reasonable defaults based on the format.
4760          */
4761
4762         return 0;
4763 }
4764
4765 static void
4766 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4767                                  uint64_t tiling_flags)
4768 {
4769         /* Fill GFX8 params */
4770         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4771                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4772
4773                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4774                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4775                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4776                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4777                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4778
4779                 /* XXX fix me for VI */
4780                 tiling_info->gfx8.num_banks = num_banks;
4781                 tiling_info->gfx8.array_mode =
4782                                 DC_ARRAY_2D_TILED_THIN1;
4783                 tiling_info->gfx8.tile_split = tile_split;
4784                 tiling_info->gfx8.bank_width = bankw;
4785                 tiling_info->gfx8.bank_height = bankh;
4786                 tiling_info->gfx8.tile_aspect = mtaspect;
4787                 tiling_info->gfx8.tile_mode =
4788                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4789         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4790                         == DC_ARRAY_1D_TILED_THIN1) {
4791                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4792         }
4793
4794         tiling_info->gfx8.pipe_config =
4795                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4796 }
4797
4798 static void
4799 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4800                                   union dc_tiling_info *tiling_info)
4801 {
4802         tiling_info->gfx9.num_pipes =
4803                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4804         tiling_info->gfx9.num_banks =
4805                 adev->gfx.config.gb_addr_config_fields.num_banks;
4806         tiling_info->gfx9.pipe_interleave =
4807                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4808         tiling_info->gfx9.num_shader_engines =
4809                 adev->gfx.config.gb_addr_config_fields.num_se;
4810         tiling_info->gfx9.max_compressed_frags =
4811                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4812         tiling_info->gfx9.num_rb_per_se =
4813                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4814         tiling_info->gfx9.shaderEnable = 1;
4815         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4816                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4817 }
4818
4819 static int
4820 validate_dcc(struct amdgpu_device *adev,
4821              const enum surface_pixel_format format,
4822              const enum dc_rotation_angle rotation,
4823              const union dc_tiling_info *tiling_info,
4824              const struct dc_plane_dcc_param *dcc,
4825              const struct dc_plane_address *address,
4826              const struct plane_size *plane_size)
4827 {
4828         struct dc *dc = adev->dm.dc;
4829         struct dc_dcc_surface_param input;
4830         struct dc_surface_dcc_cap output;
4831
4832         memset(&input, 0, sizeof(input));
4833         memset(&output, 0, sizeof(output));
4834
4835         if (!dcc->enable)
4836                 return 0;
4837
4838         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4839             !dc->cap_funcs.get_dcc_compression_cap)
4840                 return -EINVAL;
4841
4842         input.format = format;
4843         input.surface_size.width = plane_size->surface_size.width;
4844         input.surface_size.height = plane_size->surface_size.height;
4845         input.swizzle_mode = tiling_info->gfx9.swizzle;
4846
4847         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4848                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4849         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4850                 input.scan = SCAN_DIRECTION_VERTICAL;
4851
4852         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4853                 return -EINVAL;
4854
4855         if (!output.capable)
4856                 return -EINVAL;
4857
4858         if (dcc->independent_64b_blks == 0 &&
4859             output.grph.rgb.independent_64b_blks != 0)
4860                 return -EINVAL;
4861
4862         return 0;
4863 }
4864
4865 static bool
4866 modifier_has_dcc(uint64_t modifier)
4867 {
4868         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4869 }
4870
4871 static unsigned
4872 modifier_gfx9_swizzle_mode(uint64_t modifier)
4873 {
4874         if (modifier == DRM_FORMAT_MOD_LINEAR)
4875                 return 0;
4876
4877         return AMD_FMT_MOD_GET(TILE, modifier);
4878 }
4879
4880 static const struct drm_format_info *
4881 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4882 {
4883         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4884 }
4885
4886 static void
4887 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4888                                     union dc_tiling_info *tiling_info,
4889                                     uint64_t modifier)
4890 {
4891         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4892         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4893         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4894         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4895
4896         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4897
4898         if (!IS_AMD_FMT_MOD(modifier))
4899                 return;
4900
4901         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4902         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4903
4904         if (adev->family >= AMDGPU_FAMILY_NV) {
4905                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4906         } else {
4907                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4908
4909                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4910         }
4911 }
4912
4913 enum dm_micro_swizzle {
4914         MICRO_SWIZZLE_Z = 0,
4915         MICRO_SWIZZLE_S = 1,
4916         MICRO_SWIZZLE_D = 2,
4917         MICRO_SWIZZLE_R = 3
4918 };
4919
4920 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4921                                           uint32_t format,
4922                                           uint64_t modifier)
4923 {
4924         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4925         const struct drm_format_info *info = drm_format_info(format);
4926         int i;
4927
4928         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4929
4930         if (!info)
4931                 return false;
4932
4933         /*
4934          * We always have to allow these modifiers:
4935          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4936          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4937          */
4938         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4939             modifier == DRM_FORMAT_MOD_INVALID) {
4940                 return true;
4941         }
4942
4943         /* Check that the modifier is on the list of the plane's supported modifiers. */
4944         for (i = 0; i < plane->modifier_count; i++) {
4945                 if (modifier == plane->modifiers[i])
4946                         break;
4947         }
4948         if (i == plane->modifier_count)
4949                 return false;
4950
4951         /*
4952          * For D swizzle the canonical modifier depends on the bpp, so check
4953          * it here.
4954          */
4955         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4956             adev->family >= AMDGPU_FAMILY_NV) {
4957                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4958                         return false;
4959         }
4960
4961         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4962             info->cpp[0] < 8)
4963                 return false;
4964
4965         if (modifier_has_dcc(modifier)) {
4966                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4967                 if (info->cpp[0] != 4)
4968                         return false;
4969                 /* We support multi-planar formats, but not when combined with
4970                  * additional DCC metadata planes. */
4971                 if (info->num_planes > 1)
4972                         return false;
4973         }
4974
4975         return true;
4976 }
4977
4978 static void
4979 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4980 {
4981         if (!*mods)
4982                 return;
4983
4984         if (*cap - *size < 1) {
4985                 uint64_t new_cap = *cap * 2;
4986                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4987
4988                 if (!new_mods) {
4989                         kfree(*mods);
4990                         *mods = NULL;
4991                         return;
4992                 }
4993
4994                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4995                 kfree(*mods);
4996                 *mods = new_mods;
4997                 *cap = new_cap;
4998         }
4999
5000         (*mods)[*size] = mod;
5001         *size += 1;
5002 }
5003
5004 static void
5005 add_gfx9_modifiers(const struct amdgpu_device *adev,
5006                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5007 {
5008         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5009         int pipe_xor_bits = min(8, pipes +
5010                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5011         int bank_xor_bits = min(8 - pipe_xor_bits,
5012                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5013         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5014                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5015
5016
5017         if (adev->family == AMDGPU_FAMILY_RV) {
5018                 /* Raven2 and later */
5019                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5020
5021                 /*
5022                  * No _D DCC swizzles yet because we only allow 32bpp, which
5023                  * doesn't support _D on DCN
5024                  */
5025
5026                 if (has_constant_encode) {
5027                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5029                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5030                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5031                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5032                                     AMD_FMT_MOD_SET(DCC, 1) |
5033                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5034                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5035                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5036                 }
5037
5038                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5040                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5041                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5042                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5043                             AMD_FMT_MOD_SET(DCC, 1) |
5044                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5045                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5046                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5047
5048                 if (has_constant_encode) {
5049                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5050                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5051                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5052                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5053                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5054                                     AMD_FMT_MOD_SET(DCC, 1) |
5055                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5056                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5057                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5058
5059                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060                                     AMD_FMT_MOD_SET(RB, rb) |
5061                                     AMD_FMT_MOD_SET(PIPE, pipes));
5062                 }
5063
5064                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5065                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5066                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5067                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5068                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5069                             AMD_FMT_MOD_SET(DCC, 1) |
5070                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5071                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5072                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5073                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5074                             AMD_FMT_MOD_SET(RB, rb) |
5075                             AMD_FMT_MOD_SET(PIPE, pipes));
5076         }
5077
5078         /*
5079          * Only supported for 64bpp on Raven, will be filtered on format in
5080          * dm_plane_format_mod_supported.
5081          */
5082         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5083                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5084                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5085                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5086                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5087
5088         if (adev->family == AMDGPU_FAMILY_RV) {
5089                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5091                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5092                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5094         }
5095
5096         /*
5097          * Only supported for 64bpp on Raven, will be filtered on format in
5098          * dm_plane_format_mod_supported.
5099          */
5100         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5102                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5103
5104         if (adev->family == AMDGPU_FAMILY_RV) {
5105                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5107                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5108         }
5109 }
5110
5111 static void
5112 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5113                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5114 {
5115         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5116
5117         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5119                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5120                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5121                     AMD_FMT_MOD_SET(DCC, 1) |
5122                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5123                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5124                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5125
5126         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5128                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5129                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5130                     AMD_FMT_MOD_SET(DCC, 1) |
5131                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5132                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5133                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5134                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5135
5136         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5138                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5139                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5140
5141         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5144                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5145
5146
5147         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5148         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5150                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151
5152         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5154                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 }
5156
5157 static void
5158 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5159                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5160 {
5161         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5162         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5163
5164         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5165                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5166                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5167                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5168                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5169                     AMD_FMT_MOD_SET(DCC, 1) |
5170                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5171                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5172                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5173                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5174
5175         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5177                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5178                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5179                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5180                     AMD_FMT_MOD_SET(DCC, 1) |
5181                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5182                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5183                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5184
5185         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5186                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5187                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5188                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5189                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5190                     AMD_FMT_MOD_SET(DCC, 1) |
5191                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5192                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5193                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5194                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5195                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5196
5197         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5198                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5199                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5200                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5201                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5202                     AMD_FMT_MOD_SET(DCC, 1) |
5203                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5204                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5205                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5206                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5207
5208         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5209                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5210                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5211                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5212                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5213
5214         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5215                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5216                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5217                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5218                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5219
5220         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5221         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5222                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5223                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5224
5225         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5227                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5228 }
5229
5230 static int
5231 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5232 {
5233         uint64_t size = 0, capacity = 128;
5234         *mods = NULL;
5235
5236         /* We have not hooked up any pre-GFX9 modifiers. */
5237         if (adev->family < AMDGPU_FAMILY_AI)
5238                 return 0;
5239
5240         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5241
5242         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5243                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5244                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5245                 return *mods ? 0 : -ENOMEM;
5246         }
5247
5248         switch (adev->family) {
5249         case AMDGPU_FAMILY_AI:
5250         case AMDGPU_FAMILY_RV:
5251                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5252                 break;
5253         case AMDGPU_FAMILY_NV:
5254         case AMDGPU_FAMILY_VGH:
5255         case AMDGPU_FAMILY_YC:
5256         case AMDGPU_FAMILY_GC_10_3_6:
5257         case AMDGPU_FAMILY_GC_10_3_7:
5258                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5259                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5260                 else
5261                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5262                 break;
5263         }
5264
5265         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5266
5267         /* INVALID marks the end of the list. */
5268         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5269
5270         if (!*mods)
5271                 return -ENOMEM;
5272
5273         return 0;
5274 }
5275
5276 static int
5277 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5278                                           const struct amdgpu_framebuffer *afb,
5279                                           const enum surface_pixel_format format,
5280                                           const enum dc_rotation_angle rotation,
5281                                           const struct plane_size *plane_size,
5282                                           union dc_tiling_info *tiling_info,
5283                                           struct dc_plane_dcc_param *dcc,
5284                                           struct dc_plane_address *address,
5285                                           const bool force_disable_dcc)
5286 {
5287         const uint64_t modifier = afb->base.modifier;
5288         int ret = 0;
5289
5290         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5291         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5292
5293         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5294                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5295                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5296                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5297
5298                 dcc->enable = 1;
5299                 dcc->meta_pitch = afb->base.pitches[1];
5300                 dcc->independent_64b_blks = independent_64b_blks;
5301                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5302                         if (independent_64b_blks && independent_128b_blks)
5303                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5304                         else if (independent_128b_blks)
5305                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5306                         else if (independent_64b_blks && !independent_128b_blks)
5307                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5308                         else
5309                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5310                 } else {
5311                         if (independent_64b_blks)
5312                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5313                         else
5314                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5315                 }
5316
5317                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5318                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5319         }
5320
5321         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5322         if (ret)
5323                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5324
5325         return ret;
5326 }
5327
5328 static int
5329 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5330                              const struct amdgpu_framebuffer *afb,
5331                              const enum surface_pixel_format format,
5332                              const enum dc_rotation_angle rotation,
5333                              const uint64_t tiling_flags,
5334                              union dc_tiling_info *tiling_info,
5335                              struct plane_size *plane_size,
5336                              struct dc_plane_dcc_param *dcc,
5337                              struct dc_plane_address *address,
5338                              bool tmz_surface,
5339                              bool force_disable_dcc)
5340 {
5341         const struct drm_framebuffer *fb = &afb->base;
5342         int ret;
5343
5344         memset(tiling_info, 0, sizeof(*tiling_info));
5345         memset(plane_size, 0, sizeof(*plane_size));
5346         memset(dcc, 0, sizeof(*dcc));
5347         memset(address, 0, sizeof(*address));
5348
5349         address->tmz_surface = tmz_surface;
5350
5351         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5352                 uint64_t addr = afb->address + fb->offsets[0];
5353
5354                 plane_size->surface_size.x = 0;
5355                 plane_size->surface_size.y = 0;
5356                 plane_size->surface_size.width = fb->width;
5357                 plane_size->surface_size.height = fb->height;
5358                 plane_size->surface_pitch =
5359                         fb->pitches[0] / fb->format->cpp[0];
5360
5361                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5362                 address->grph.addr.low_part = lower_32_bits(addr);
5363                 address->grph.addr.high_part = upper_32_bits(addr);
5364         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5365                 uint64_t luma_addr = afb->address + fb->offsets[0];
5366                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5367
5368                 plane_size->surface_size.x = 0;
5369                 plane_size->surface_size.y = 0;
5370                 plane_size->surface_size.width = fb->width;
5371                 plane_size->surface_size.height = fb->height;
5372                 plane_size->surface_pitch =
5373                         fb->pitches[0] / fb->format->cpp[0];
5374
5375                 plane_size->chroma_size.x = 0;
5376                 plane_size->chroma_size.y = 0;
5377                 /* TODO: set these based on surface format */
5378                 plane_size->chroma_size.width = fb->width / 2;
5379                 plane_size->chroma_size.height = fb->height / 2;
5380
5381                 plane_size->chroma_pitch =
5382                         fb->pitches[1] / fb->format->cpp[1];
5383
5384                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5385                 address->video_progressive.luma_addr.low_part =
5386                         lower_32_bits(luma_addr);
5387                 address->video_progressive.luma_addr.high_part =
5388                         upper_32_bits(luma_addr);
5389                 address->video_progressive.chroma_addr.low_part =
5390                         lower_32_bits(chroma_addr);
5391                 address->video_progressive.chroma_addr.high_part =
5392                         upper_32_bits(chroma_addr);
5393         }
5394
5395         if (adev->family >= AMDGPU_FAMILY_AI) {
5396                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5397                                                                 rotation, plane_size,
5398                                                                 tiling_info, dcc,
5399                                                                 address,
5400                                                                 force_disable_dcc);
5401                 if (ret)
5402                         return ret;
5403         } else {
5404                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5405         }
5406
5407         return 0;
5408 }
5409
5410 static void
5411 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5412                                bool *per_pixel_alpha, bool *global_alpha,
5413                                int *global_alpha_value)
5414 {
5415         *per_pixel_alpha = false;
5416         *global_alpha = false;
5417         *global_alpha_value = 0xff;
5418
5419         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5420                 return;
5421
5422         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5423                 static const uint32_t alpha_formats[] = {
5424                         DRM_FORMAT_ARGB8888,
5425                         DRM_FORMAT_RGBA8888,
5426                         DRM_FORMAT_ABGR8888,
5427                 };
5428                 uint32_t format = plane_state->fb->format->format;
5429                 unsigned int i;
5430
5431                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5432                         if (format == alpha_formats[i]) {
5433                                 *per_pixel_alpha = true;
5434                                 break;
5435                         }
5436                 }
5437         }
5438
5439         if (plane_state->alpha < 0xffff) {
5440                 *global_alpha = true;
5441                 *global_alpha_value = plane_state->alpha >> 8;
5442         }
5443 }
5444
5445 static int
5446 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5447                             const enum surface_pixel_format format,
5448                             enum dc_color_space *color_space)
5449 {
5450         bool full_range;
5451
5452         *color_space = COLOR_SPACE_SRGB;
5453
5454         /* DRM color properties only affect non-RGB formats. */
5455         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5456                 return 0;
5457
5458         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5459
5460         switch (plane_state->color_encoding) {
5461         case DRM_COLOR_YCBCR_BT601:
5462                 if (full_range)
5463                         *color_space = COLOR_SPACE_YCBCR601;
5464                 else
5465                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5466                 break;
5467
5468         case DRM_COLOR_YCBCR_BT709:
5469                 if (full_range)
5470                         *color_space = COLOR_SPACE_YCBCR709;
5471                 else
5472                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5473                 break;
5474
5475         case DRM_COLOR_YCBCR_BT2020:
5476                 if (full_range)
5477                         *color_space = COLOR_SPACE_2020_YCBCR;
5478                 else
5479                         return -EINVAL;
5480                 break;
5481
5482         default:
5483                 return -EINVAL;
5484         }
5485
5486         return 0;
5487 }
5488
5489 static int
5490 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5491                             const struct drm_plane_state *plane_state,
5492                             const uint64_t tiling_flags,
5493                             struct dc_plane_info *plane_info,
5494                             struct dc_plane_address *address,
5495                             bool tmz_surface,
5496                             bool force_disable_dcc)
5497 {
5498         const struct drm_framebuffer *fb = plane_state->fb;
5499         const struct amdgpu_framebuffer *afb =
5500                 to_amdgpu_framebuffer(plane_state->fb);
5501         int ret;
5502
5503         memset(plane_info, 0, sizeof(*plane_info));
5504
5505         switch (fb->format->format) {
5506         case DRM_FORMAT_C8:
5507                 plane_info->format =
5508                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5509                 break;
5510         case DRM_FORMAT_RGB565:
5511                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5512                 break;
5513         case DRM_FORMAT_XRGB8888:
5514         case DRM_FORMAT_ARGB8888:
5515                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5516                 break;
5517         case DRM_FORMAT_XRGB2101010:
5518         case DRM_FORMAT_ARGB2101010:
5519                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5520                 break;
5521         case DRM_FORMAT_XBGR2101010:
5522         case DRM_FORMAT_ABGR2101010:
5523                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5524                 break;
5525         case DRM_FORMAT_XBGR8888:
5526         case DRM_FORMAT_ABGR8888:
5527                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5528                 break;
5529         case DRM_FORMAT_NV21:
5530                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5531                 break;
5532         case DRM_FORMAT_NV12:
5533                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5534                 break;
5535         case DRM_FORMAT_P010:
5536                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5537                 break;
5538         case DRM_FORMAT_XRGB16161616F:
5539         case DRM_FORMAT_ARGB16161616F:
5540                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5541                 break;
5542         case DRM_FORMAT_XBGR16161616F:
5543         case DRM_FORMAT_ABGR16161616F:
5544                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5545                 break;
5546         case DRM_FORMAT_XRGB16161616:
5547         case DRM_FORMAT_ARGB16161616:
5548                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5549                 break;
5550         case DRM_FORMAT_XBGR16161616:
5551         case DRM_FORMAT_ABGR16161616:
5552                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5553                 break;
5554         default:
5555                 DRM_ERROR(
5556                         "Unsupported screen format %p4cc\n",
5557                         &fb->format->format);
5558                 return -EINVAL;
5559         }
5560
5561         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5562         case DRM_MODE_ROTATE_0:
5563                 plane_info->rotation = ROTATION_ANGLE_0;
5564                 break;
5565         case DRM_MODE_ROTATE_90:
5566                 plane_info->rotation = ROTATION_ANGLE_90;
5567                 break;
5568         case DRM_MODE_ROTATE_180:
5569                 plane_info->rotation = ROTATION_ANGLE_180;
5570                 break;
5571         case DRM_MODE_ROTATE_270:
5572                 plane_info->rotation = ROTATION_ANGLE_270;
5573                 break;
5574         default:
5575                 plane_info->rotation = ROTATION_ANGLE_0;
5576                 break;
5577         }
5578
5579         plane_info->visible = true;
5580         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5581
5582         plane_info->layer_index = 0;
5583
5584         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5585                                           &plane_info->color_space);
5586         if (ret)
5587                 return ret;
5588
5589         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5590                                            plane_info->rotation, tiling_flags,
5591                                            &plane_info->tiling_info,
5592                                            &plane_info->plane_size,
5593                                            &plane_info->dcc, address, tmz_surface,
5594                                            force_disable_dcc);
5595         if (ret)
5596                 return ret;
5597
5598         fill_blending_from_plane_state(
5599                 plane_state, &plane_info->per_pixel_alpha,
5600                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5601
5602         return 0;
5603 }
5604
5605 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5606                                     struct dc_plane_state *dc_plane_state,
5607                                     struct drm_plane_state *plane_state,
5608                                     struct drm_crtc_state *crtc_state)
5609 {
5610         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5611         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5612         struct dc_scaling_info scaling_info;
5613         struct dc_plane_info plane_info;
5614         int ret;
5615         bool force_disable_dcc = false;
5616
5617         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5618         if (ret)
5619                 return ret;
5620
5621         dc_plane_state->src_rect = scaling_info.src_rect;
5622         dc_plane_state->dst_rect = scaling_info.dst_rect;
5623         dc_plane_state->clip_rect = scaling_info.clip_rect;
5624         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5625
5626         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5627         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5628                                           afb->tiling_flags,
5629                                           &plane_info,
5630                                           &dc_plane_state->address,
5631                                           afb->tmz_surface,
5632                                           force_disable_dcc);
5633         if (ret)
5634                 return ret;
5635
5636         dc_plane_state->format = plane_info.format;
5637         dc_plane_state->color_space = plane_info.color_space;
5638         dc_plane_state->format = plane_info.format;
5639         dc_plane_state->plane_size = plane_info.plane_size;
5640         dc_plane_state->rotation = plane_info.rotation;
5641         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5642         dc_plane_state->stereo_format = plane_info.stereo_format;
5643         dc_plane_state->tiling_info = plane_info.tiling_info;
5644         dc_plane_state->visible = plane_info.visible;
5645         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5646         dc_plane_state->global_alpha = plane_info.global_alpha;
5647         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5648         dc_plane_state->dcc = plane_info.dcc;
5649         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5650         dc_plane_state->flip_int_enabled = true;
5651
5652         /*
5653          * Always set input transfer function, since plane state is refreshed
5654          * every time.
5655          */
5656         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5657         if (ret)
5658                 return ret;
5659
5660         return 0;
5661 }
5662
5663 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5664                                            const struct dm_connector_state *dm_state,
5665                                            struct dc_stream_state *stream)
5666 {
5667         enum amdgpu_rmx_type rmx_type;
5668
5669         struct rect src = { 0 }; /* viewport in composition space*/
5670         struct rect dst = { 0 }; /* stream addressable area */
5671
5672         /* no mode. nothing to be done */
5673         if (!mode)
5674                 return;
5675
5676         /* Full screen scaling by default */
5677         src.width = mode->hdisplay;
5678         src.height = mode->vdisplay;
5679         dst.width = stream->timing.h_addressable;
5680         dst.height = stream->timing.v_addressable;
5681
5682         if (dm_state) {
5683                 rmx_type = dm_state->scaling;
5684                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5685                         if (src.width * dst.height <
5686                                         src.height * dst.width) {
5687                                 /* height needs less upscaling/more downscaling */
5688                                 dst.width = src.width *
5689                                                 dst.height / src.height;
5690                         } else {
5691                                 /* width needs less upscaling/more downscaling */
5692                                 dst.height = src.height *
5693                                                 dst.width / src.width;
5694                         }
5695                 } else if (rmx_type == RMX_CENTER) {
5696                         dst = src;
5697                 }
5698
5699                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5700                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5701
5702                 if (dm_state->underscan_enable) {
5703                         dst.x += dm_state->underscan_hborder / 2;
5704                         dst.y += dm_state->underscan_vborder / 2;
5705                         dst.width -= dm_state->underscan_hborder;
5706                         dst.height -= dm_state->underscan_vborder;
5707                 }
5708         }
5709
5710         stream->src = src;
5711         stream->dst = dst;
5712
5713         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5714                       dst.x, dst.y, dst.width, dst.height);
5715
5716 }
5717
5718 static enum dc_color_depth
5719 convert_color_depth_from_display_info(const struct drm_connector *connector,
5720                                       bool is_y420, int requested_bpc)
5721 {
5722         uint8_t bpc;
5723
5724         if (is_y420) {
5725                 bpc = 8;
5726
5727                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5728                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5729                         bpc = 16;
5730                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5731                         bpc = 12;
5732                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5733                         bpc = 10;
5734         } else {
5735                 bpc = (uint8_t)connector->display_info.bpc;
5736                 /* Assume 8 bpc by default if no bpc is specified. */
5737                 bpc = bpc ? bpc : 8;
5738         }
5739
5740         if (requested_bpc > 0) {
5741                 /*
5742                  * Cap display bpc based on the user requested value.
5743                  *
5744                  * The value for state->max_bpc may not correctly updated
5745                  * depending on when the connector gets added to the state
5746                  * or if this was called outside of atomic check, so it
5747                  * can't be used directly.
5748                  */
5749                 bpc = min_t(u8, bpc, requested_bpc);
5750
5751                 /* Round down to the nearest even number. */
5752                 bpc = bpc - (bpc & 1);
5753         }
5754
5755         switch (bpc) {
5756         case 0:
5757                 /*
5758                  * Temporary Work around, DRM doesn't parse color depth for
5759                  * EDID revision before 1.4
5760                  * TODO: Fix edid parsing
5761                  */
5762                 return COLOR_DEPTH_888;
5763         case 6:
5764                 return COLOR_DEPTH_666;
5765         case 8:
5766                 return COLOR_DEPTH_888;
5767         case 10:
5768                 return COLOR_DEPTH_101010;
5769         case 12:
5770                 return COLOR_DEPTH_121212;
5771         case 14:
5772                 return COLOR_DEPTH_141414;
5773         case 16:
5774                 return COLOR_DEPTH_161616;
5775         default:
5776                 return COLOR_DEPTH_UNDEFINED;
5777         }
5778 }
5779
5780 static enum dc_aspect_ratio
5781 get_aspect_ratio(const struct drm_display_mode *mode_in)
5782 {
5783         /* 1-1 mapping, since both enums follow the HDMI spec. */
5784         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5785 }
5786
5787 static enum dc_color_space
5788 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5789 {
5790         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5791
5792         switch (dc_crtc_timing->pixel_encoding) {
5793         case PIXEL_ENCODING_YCBCR422:
5794         case PIXEL_ENCODING_YCBCR444:
5795         case PIXEL_ENCODING_YCBCR420:
5796         {
5797                 /*
5798                  * 27030khz is the separation point between HDTV and SDTV
5799                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5800                  * respectively
5801                  */
5802                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5803                         if (dc_crtc_timing->flags.Y_ONLY)
5804                                 color_space =
5805                                         COLOR_SPACE_YCBCR709_LIMITED;
5806                         else
5807                                 color_space = COLOR_SPACE_YCBCR709;
5808                 } else {
5809                         if (dc_crtc_timing->flags.Y_ONLY)
5810                                 color_space =
5811                                         COLOR_SPACE_YCBCR601_LIMITED;
5812                         else
5813                                 color_space = COLOR_SPACE_YCBCR601;
5814                 }
5815
5816         }
5817         break;
5818         case PIXEL_ENCODING_RGB:
5819                 color_space = COLOR_SPACE_SRGB;
5820                 break;
5821
5822         default:
5823                 WARN_ON(1);
5824                 break;
5825         }
5826
5827         return color_space;
5828 }
5829
5830 static bool adjust_colour_depth_from_display_info(
5831         struct dc_crtc_timing *timing_out,
5832         const struct drm_display_info *info)
5833 {
5834         enum dc_color_depth depth = timing_out->display_color_depth;
5835         int normalized_clk;
5836         do {
5837                 normalized_clk = timing_out->pix_clk_100hz / 10;
5838                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5839                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5840                         normalized_clk /= 2;
5841                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5842                 switch (depth) {
5843                 case COLOR_DEPTH_888:
5844                         break;
5845                 case COLOR_DEPTH_101010:
5846                         normalized_clk = (normalized_clk * 30) / 24;
5847                         break;
5848                 case COLOR_DEPTH_121212:
5849                         normalized_clk = (normalized_clk * 36) / 24;
5850                         break;
5851                 case COLOR_DEPTH_161616:
5852                         normalized_clk = (normalized_clk * 48) / 24;
5853                         break;
5854                 default:
5855                         /* The above depths are the only ones valid for HDMI. */
5856                         return false;
5857                 }
5858                 if (normalized_clk <= info->max_tmds_clock) {
5859                         timing_out->display_color_depth = depth;
5860                         return true;
5861                 }
5862         } while (--depth > COLOR_DEPTH_666);
5863         return false;
5864 }
5865
5866 static void fill_stream_properties_from_drm_display_mode(
5867         struct dc_stream_state *stream,
5868         const struct drm_display_mode *mode_in,
5869         const struct drm_connector *connector,
5870         const struct drm_connector_state *connector_state,
5871         const struct dc_stream_state *old_stream,
5872         int requested_bpc)
5873 {
5874         struct dc_crtc_timing *timing_out = &stream->timing;
5875         const struct drm_display_info *info = &connector->display_info;
5876         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5877         struct hdmi_vendor_infoframe hv_frame;
5878         struct hdmi_avi_infoframe avi_frame;
5879
5880         memset(&hv_frame, 0, sizeof(hv_frame));
5881         memset(&avi_frame, 0, sizeof(avi_frame));
5882
5883         timing_out->h_border_left = 0;
5884         timing_out->h_border_right = 0;
5885         timing_out->v_border_top = 0;
5886         timing_out->v_border_bottom = 0;
5887         /* TODO: un-hardcode */
5888         if (drm_mode_is_420_only(info, mode_in)
5889                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5890                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5891         else if (drm_mode_is_420_also(info, mode_in)
5892                         && aconnector->force_yuv420_output)
5893                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5894         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5895                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5896                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5897         else
5898                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5899
5900         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5901         timing_out->display_color_depth = convert_color_depth_from_display_info(
5902                 connector,
5903                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5904                 requested_bpc);
5905         timing_out->scan_type = SCANNING_TYPE_NODATA;
5906         timing_out->hdmi_vic = 0;
5907
5908         if(old_stream) {
5909                 timing_out->vic = old_stream->timing.vic;
5910                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5911                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5912         } else {
5913                 timing_out->vic = drm_match_cea_mode(mode_in);
5914                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5915                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5916                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5917                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5918         }
5919
5920         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5921                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5922                 timing_out->vic = avi_frame.video_code;
5923                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5924                 timing_out->hdmi_vic = hv_frame.vic;
5925         }
5926
5927         if (is_freesync_video_mode(mode_in, aconnector)) {
5928                 timing_out->h_addressable = mode_in->hdisplay;
5929                 timing_out->h_total = mode_in->htotal;
5930                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5931                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5932                 timing_out->v_total = mode_in->vtotal;
5933                 timing_out->v_addressable = mode_in->vdisplay;
5934                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5935                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5936                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5937         } else {
5938                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5939                 timing_out->h_total = mode_in->crtc_htotal;
5940                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5941                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5942                 timing_out->v_total = mode_in->crtc_vtotal;
5943                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5944                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5945                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5946                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5947         }
5948
5949         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5950
5951         stream->output_color_space = get_output_color_space(timing_out);
5952
5953         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5954         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5955         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5956                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5957                     drm_mode_is_420_also(info, mode_in) &&
5958                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5959                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5960                         adjust_colour_depth_from_display_info(timing_out, info);
5961                 }
5962         }
5963 }
5964
5965 static void fill_audio_info(struct audio_info *audio_info,
5966                             const struct drm_connector *drm_connector,
5967                             const struct dc_sink *dc_sink)
5968 {
5969         int i = 0;
5970         int cea_revision = 0;
5971         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5972
5973         audio_info->manufacture_id = edid_caps->manufacturer_id;
5974         audio_info->product_id = edid_caps->product_id;
5975
5976         cea_revision = drm_connector->display_info.cea_rev;
5977
5978         strscpy(audio_info->display_name,
5979                 edid_caps->display_name,
5980                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5981
5982         if (cea_revision >= 3) {
5983                 audio_info->mode_count = edid_caps->audio_mode_count;
5984
5985                 for (i = 0; i < audio_info->mode_count; ++i) {
5986                         audio_info->modes[i].format_code =
5987                                         (enum audio_format_code)
5988                                         (edid_caps->audio_modes[i].format_code);
5989                         audio_info->modes[i].channel_count =
5990                                         edid_caps->audio_modes[i].channel_count;
5991                         audio_info->modes[i].sample_rates.all =
5992                                         edid_caps->audio_modes[i].sample_rate;
5993                         audio_info->modes[i].sample_size =
5994                                         edid_caps->audio_modes[i].sample_size;
5995                 }
5996         }
5997
5998         audio_info->flags.all = edid_caps->speaker_flags;
5999
6000         /* TODO: We only check for the progressive mode, check for interlace mode too */
6001         if (drm_connector->latency_present[0]) {
6002                 audio_info->video_latency = drm_connector->video_latency[0];
6003                 audio_info->audio_latency = drm_connector->audio_latency[0];
6004         }
6005
6006         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6007
6008 }
6009
6010 static void
6011 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6012                                       struct drm_display_mode *dst_mode)
6013 {
6014         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6015         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6016         dst_mode->crtc_clock = src_mode->crtc_clock;
6017         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6018         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6019         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6020         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6021         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6022         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6023         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6024         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6025         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6026         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6027         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6028 }
6029
6030 static void
6031 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6032                                         const struct drm_display_mode *native_mode,
6033                                         bool scale_enabled)
6034 {
6035         if (scale_enabled) {
6036                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6037         } else if (native_mode->clock == drm_mode->clock &&
6038                         native_mode->htotal == drm_mode->htotal &&
6039                         native_mode->vtotal == drm_mode->vtotal) {
6040                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6041         } else {
6042                 /* no scaling nor amdgpu inserted, no need to patch */
6043         }
6044 }
6045
6046 static struct dc_sink *
6047 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6048 {
6049         struct dc_sink_init_data sink_init_data = { 0 };
6050         struct dc_sink *sink = NULL;
6051         sink_init_data.link = aconnector->dc_link;
6052         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6053
6054         sink = dc_sink_create(&sink_init_data);
6055         if (!sink) {
6056                 DRM_ERROR("Failed to create sink!\n");
6057                 return NULL;
6058         }
6059         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6060
6061         return sink;
6062 }
6063
6064 static void set_multisync_trigger_params(
6065                 struct dc_stream_state *stream)
6066 {
6067         struct dc_stream_state *master = NULL;
6068
6069         if (stream->triggered_crtc_reset.enabled) {
6070                 master = stream->triggered_crtc_reset.event_source;
6071                 stream->triggered_crtc_reset.event =
6072                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6073                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6074                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6075         }
6076 }
6077
6078 static void set_master_stream(struct dc_stream_state *stream_set[],
6079                               int stream_count)
6080 {
6081         int j, highest_rfr = 0, master_stream = 0;
6082
6083         for (j = 0;  j < stream_count; j++) {
6084                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6085                         int refresh_rate = 0;
6086
6087                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6088                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6089                         if (refresh_rate > highest_rfr) {
6090                                 highest_rfr = refresh_rate;
6091                                 master_stream = j;
6092                         }
6093                 }
6094         }
6095         for (j = 0;  j < stream_count; j++) {
6096                 if (stream_set[j])
6097                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6098         }
6099 }
6100
6101 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6102 {
6103         int i = 0;
6104         struct dc_stream_state *stream;
6105
6106         if (context->stream_count < 2)
6107                 return;
6108         for (i = 0; i < context->stream_count ; i++) {
6109                 if (!context->streams[i])
6110                         continue;
6111                 /*
6112                  * TODO: add a function to read AMD VSDB bits and set
6113                  * crtc_sync_master.multi_sync_enabled flag
6114                  * For now it's set to false
6115                  */
6116         }
6117
6118         set_master_stream(context->streams, context->stream_count);
6119
6120         for (i = 0; i < context->stream_count ; i++) {
6121                 stream = context->streams[i];
6122
6123                 if (!stream)
6124                         continue;
6125
6126                 set_multisync_trigger_params(stream);
6127         }
6128 }
6129
6130 #if defined(CONFIG_DRM_AMD_DC_DCN)
6131 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6132                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6133                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6134 {
6135         stream->timing.flags.DSC = 0;
6136         dsc_caps->is_dsc_supported = false;
6137
6138         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6139                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6140                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6141                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6142                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6143                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6144                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6145                                 dsc_caps);
6146         }
6147 }
6148
6149 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6150                                     struct dc_sink *sink, struct dc_stream_state *stream,
6151                                     struct dsc_dec_dpcd_caps *dsc_caps,
6152                                     uint32_t max_dsc_target_bpp_limit_override)
6153 {
6154         const struct dc_link_settings *verified_link_cap = NULL;
6155         uint32_t link_bw_in_kbps;
6156         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6157         struct dc *dc = sink->ctx->dc;
6158         struct dc_dsc_bw_range bw_range = {0};
6159         struct dc_dsc_config dsc_cfg = {0};
6160
6161         verified_link_cap = dc_link_get_link_cap(stream->link);
6162         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6163         edp_min_bpp_x16 = 8 * 16;
6164         edp_max_bpp_x16 = 8 * 16;
6165
6166         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6167                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6168
6169         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6170                 edp_min_bpp_x16 = edp_max_bpp_x16;
6171
6172         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6173                                 dc->debug.dsc_min_slice_height_override,
6174                                 edp_min_bpp_x16, edp_max_bpp_x16,
6175                                 dsc_caps,
6176                                 &stream->timing,
6177                                 &bw_range)) {
6178
6179                 if (bw_range.max_kbps < link_bw_in_kbps) {
6180                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6181                                         dsc_caps,
6182                                         dc->debug.dsc_min_slice_height_override,
6183                                         max_dsc_target_bpp_limit_override,
6184                                         0,
6185                                         &stream->timing,
6186                                         &dsc_cfg)) {
6187                                 stream->timing.dsc_cfg = dsc_cfg;
6188                                 stream->timing.flags.DSC = 1;
6189                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6190                         }
6191                         return;
6192                 }
6193         }
6194
6195         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6196                                 dsc_caps,
6197                                 dc->debug.dsc_min_slice_height_override,
6198                                 max_dsc_target_bpp_limit_override,
6199                                 link_bw_in_kbps,
6200                                 &stream->timing,
6201                                 &dsc_cfg)) {
6202                 stream->timing.dsc_cfg = dsc_cfg;
6203                 stream->timing.flags.DSC = 1;
6204         }
6205 }
6206
6207 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6208                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6209                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6210 {
6211         struct drm_connector *drm_connector = &aconnector->base;
6212         uint32_t link_bandwidth_kbps;
6213         uint32_t max_dsc_target_bpp_limit_override = 0;
6214         struct dc *dc = sink->ctx->dc;
6215         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6216         uint32_t dsc_max_supported_bw_in_kbps;
6217
6218         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6219                                                         dc_link_get_link_cap(aconnector->dc_link));
6220
6221         if (stream->link && stream->link->local_sink)
6222                 max_dsc_target_bpp_limit_override =
6223                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6224
6225         /* Set DSC policy according to dsc_clock_en */
6226         dc_dsc_policy_set_enable_dsc_when_not_needed(
6227                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6228
6229         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6230             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6231
6232                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6233
6234         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6235                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6236                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6237                                                 dsc_caps,
6238                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6239                                                 max_dsc_target_bpp_limit_override,
6240                                                 link_bandwidth_kbps,
6241                                                 &stream->timing,
6242                                                 &stream->timing.dsc_cfg)) {
6243                                 stream->timing.flags.DSC = 1;
6244                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6245                                                                  __func__, drm_connector->name);
6246                         }
6247                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6248                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6249                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6250                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6251
6252                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6253                                         max_supported_bw_in_kbps > 0 &&
6254                                         dsc_max_supported_bw_in_kbps > 0)
6255                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6256                                                 dsc_caps,
6257                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6258                                                 max_dsc_target_bpp_limit_override,
6259                                                 dsc_max_supported_bw_in_kbps,
6260                                                 &stream->timing,
6261                                                 &stream->timing.dsc_cfg)) {
6262                                         stream->timing.flags.DSC = 1;
6263                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6264                                                                          __func__, drm_connector->name);
6265                                 }
6266                 }
6267         }
6268
6269         /* Overwrite the stream flag if DSC is enabled through debugfs */
6270         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6271                 stream->timing.flags.DSC = 1;
6272
6273         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6274                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6275
6276         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6277                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6278
6279         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6280                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6281 }
6282 #endif /* CONFIG_DRM_AMD_DC_DCN */
6283
6284 /**
6285  * DOC: FreeSync Video
6286  *
6287  * When a userspace application wants to play a video, the content follows a
6288  * standard format definition that usually specifies the FPS for that format.
6289  * The below list illustrates some video format and the expected FPS,
6290  * respectively:
6291  *
6292  * - TV/NTSC (23.976 FPS)
6293  * - Cinema (24 FPS)
6294  * - TV/PAL (25 FPS)
6295  * - TV/NTSC (29.97 FPS)
6296  * - TV/NTSC (30 FPS)
6297  * - Cinema HFR (48 FPS)
6298  * - TV/PAL (50 FPS)
6299  * - Commonly used (60 FPS)
6300  * - Multiples of 24 (48,72,96,120 FPS)
6301  *
6302  * The list of standards video format is not huge and can be added to the
6303  * connector modeset list beforehand. With that, userspace can leverage
6304  * FreeSync to extends the front porch in order to attain the target refresh
6305  * rate. Such a switch will happen seamlessly, without screen blanking or
6306  * reprogramming of the output in any other way. If the userspace requests a
6307  * modesetting change compatible with FreeSync modes that only differ in the
6308  * refresh rate, DC will skip the full update and avoid blink during the
6309  * transition. For example, the video player can change the modesetting from
6310  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6311  * causing any display blink. This same concept can be applied to a mode
6312  * setting change.
6313  */
6314 static struct drm_display_mode *
6315 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6316                           bool use_probed_modes)
6317 {
6318         struct drm_display_mode *m, *m_pref = NULL;
6319         u16 current_refresh, highest_refresh;
6320         struct list_head *list_head = use_probed_modes ?
6321                                                     &aconnector->base.probed_modes :
6322                                                     &aconnector->base.modes;
6323
6324         if (aconnector->freesync_vid_base.clock != 0)
6325                 return &aconnector->freesync_vid_base;
6326
6327         /* Find the preferred mode */
6328         list_for_each_entry (m, list_head, head) {
6329                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6330                         m_pref = m;
6331                         break;
6332                 }
6333         }
6334
6335         if (!m_pref) {
6336                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6337                 m_pref = list_first_entry_or_null(
6338                         &aconnector->base.modes, struct drm_display_mode, head);
6339                 if (!m_pref) {
6340                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6341                         return NULL;
6342                 }
6343         }
6344
6345         highest_refresh = drm_mode_vrefresh(m_pref);
6346
6347         /*
6348          * Find the mode with highest refresh rate with same resolution.
6349          * For some monitors, preferred mode is not the mode with highest
6350          * supported refresh rate.
6351          */
6352         list_for_each_entry (m, list_head, head) {
6353                 current_refresh  = drm_mode_vrefresh(m);
6354
6355                 if (m->hdisplay == m_pref->hdisplay &&
6356                     m->vdisplay == m_pref->vdisplay &&
6357                     highest_refresh < current_refresh) {
6358                         highest_refresh = current_refresh;
6359                         m_pref = m;
6360                 }
6361         }
6362
6363         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6364         return m_pref;
6365 }
6366
6367 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6368                                    struct amdgpu_dm_connector *aconnector)
6369 {
6370         struct drm_display_mode *high_mode;
6371         int timing_diff;
6372
6373         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6374         if (!high_mode || !mode)
6375                 return false;
6376
6377         timing_diff = high_mode->vtotal - mode->vtotal;
6378
6379         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6380             high_mode->hdisplay != mode->hdisplay ||
6381             high_mode->vdisplay != mode->vdisplay ||
6382             high_mode->hsync_start != mode->hsync_start ||
6383             high_mode->hsync_end != mode->hsync_end ||
6384             high_mode->htotal != mode->htotal ||
6385             high_mode->hskew != mode->hskew ||
6386             high_mode->vscan != mode->vscan ||
6387             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6388             high_mode->vsync_end - mode->vsync_end != timing_diff)
6389                 return false;
6390         else
6391                 return true;
6392 }
6393
6394 static struct dc_stream_state *
6395 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6396                        const struct drm_display_mode *drm_mode,
6397                        const struct dm_connector_state *dm_state,
6398                        const struct dc_stream_state *old_stream,
6399                        int requested_bpc)
6400 {
6401         struct drm_display_mode *preferred_mode = NULL;
6402         struct drm_connector *drm_connector;
6403         const struct drm_connector_state *con_state =
6404                 dm_state ? &dm_state->base : NULL;
6405         struct dc_stream_state *stream = NULL;
6406         struct drm_display_mode mode = *drm_mode;
6407         struct drm_display_mode saved_mode;
6408         struct drm_display_mode *freesync_mode = NULL;
6409         bool native_mode_found = false;
6410         bool recalculate_timing = false;
6411         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6412         int mode_refresh;
6413         int preferred_refresh = 0;
6414 #if defined(CONFIG_DRM_AMD_DC_DCN)
6415         struct dsc_dec_dpcd_caps dsc_caps;
6416 #endif
6417         struct dc_sink *sink = NULL;
6418
6419         memset(&saved_mode, 0, sizeof(saved_mode));
6420
6421         if (aconnector == NULL) {
6422                 DRM_ERROR("aconnector is NULL!\n");
6423                 return stream;
6424         }
6425
6426         drm_connector = &aconnector->base;
6427
6428         if (!aconnector->dc_sink) {
6429                 sink = create_fake_sink(aconnector);
6430                 if (!sink)
6431                         return stream;
6432         } else {
6433                 sink = aconnector->dc_sink;
6434                 dc_sink_retain(sink);
6435         }
6436
6437         stream = dc_create_stream_for_sink(sink);
6438
6439         if (stream == NULL) {
6440                 DRM_ERROR("Failed to create stream for sink!\n");
6441                 goto finish;
6442         }
6443
6444         stream->dm_stream_context = aconnector;
6445
6446         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6447                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6448
6449         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6450                 /* Search for preferred mode */
6451                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6452                         native_mode_found = true;
6453                         break;
6454                 }
6455         }
6456         if (!native_mode_found)
6457                 preferred_mode = list_first_entry_or_null(
6458                                 &aconnector->base.modes,
6459                                 struct drm_display_mode,
6460                                 head);
6461
6462         mode_refresh = drm_mode_vrefresh(&mode);
6463
6464         if (preferred_mode == NULL) {
6465                 /*
6466                  * This may not be an error, the use case is when we have no
6467                  * usermode calls to reset and set mode upon hotplug. In this
6468                  * case, we call set mode ourselves to restore the previous mode
6469                  * and the modelist may not be filled in in time.
6470                  */
6471                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6472         } else {
6473                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6474                 if (recalculate_timing) {
6475                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6476                         drm_mode_copy(&saved_mode, &mode);
6477                         drm_mode_copy(&mode, freesync_mode);
6478                 } else {
6479                         decide_crtc_timing_for_drm_display_mode(
6480                                 &mode, preferred_mode, scale);
6481
6482                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6483                 }
6484         }
6485
6486         if (recalculate_timing)
6487                 drm_mode_set_crtcinfo(&saved_mode, 0);
6488         else if (!dm_state)
6489                 drm_mode_set_crtcinfo(&mode, 0);
6490
6491        /*
6492         * If scaling is enabled and refresh rate didn't change
6493         * we copy the vic and polarities of the old timings
6494         */
6495         if (!scale || mode_refresh != preferred_refresh)
6496                 fill_stream_properties_from_drm_display_mode(
6497                         stream, &mode, &aconnector->base, con_state, NULL,
6498                         requested_bpc);
6499         else
6500                 fill_stream_properties_from_drm_display_mode(
6501                         stream, &mode, &aconnector->base, con_state, old_stream,
6502                         requested_bpc);
6503
6504 #if defined(CONFIG_DRM_AMD_DC_DCN)
6505         /* SST DSC determination policy */
6506         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6507         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6508                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6509 #endif
6510
6511         update_stream_scaling_settings(&mode, dm_state, stream);
6512
6513         fill_audio_info(
6514                 &stream->audio_info,
6515                 drm_connector,
6516                 sink);
6517
6518         update_stream_signal(stream, sink);
6519
6520         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6521                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6522
6523         if (stream->link->psr_settings.psr_feature_enabled) {
6524                 //
6525                 // should decide stream support vsc sdp colorimetry capability
6526                 // before building vsc info packet
6527                 //
6528                 stream->use_vsc_sdp_for_colorimetry = false;
6529                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6530                         stream->use_vsc_sdp_for_colorimetry =
6531                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6532                 } else {
6533                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6534                                 stream->use_vsc_sdp_for_colorimetry = true;
6535                 }
6536                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6537                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6538
6539         }
6540 finish:
6541         dc_sink_release(sink);
6542
6543         return stream;
6544 }
6545
6546 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6547 {
6548         drm_crtc_cleanup(crtc);
6549         kfree(crtc);
6550 }
6551
6552 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6553                                   struct drm_crtc_state *state)
6554 {
6555         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6556
6557         /* TODO Destroy dc_stream objects are stream object is flattened */
6558         if (cur->stream)
6559                 dc_stream_release(cur->stream);
6560
6561
6562         __drm_atomic_helper_crtc_destroy_state(state);
6563
6564
6565         kfree(state);
6566 }
6567
6568 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6569 {
6570         struct dm_crtc_state *state;
6571
6572         if (crtc->state)
6573                 dm_crtc_destroy_state(crtc, crtc->state);
6574
6575         state = kzalloc(sizeof(*state), GFP_KERNEL);
6576         if (WARN_ON(!state))
6577                 return;
6578
6579         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6580 }
6581
6582 static struct drm_crtc_state *
6583 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6584 {
6585         struct dm_crtc_state *state, *cur;
6586
6587         cur = to_dm_crtc_state(crtc->state);
6588
6589         if (WARN_ON(!crtc->state))
6590                 return NULL;
6591
6592         state = kzalloc(sizeof(*state), GFP_KERNEL);
6593         if (!state)
6594                 return NULL;
6595
6596         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6597
6598         if (cur->stream) {
6599                 state->stream = cur->stream;
6600                 dc_stream_retain(state->stream);
6601         }
6602
6603         state->active_planes = cur->active_planes;
6604         state->vrr_infopacket = cur->vrr_infopacket;
6605         state->abm_level = cur->abm_level;
6606         state->vrr_supported = cur->vrr_supported;
6607         state->freesync_config = cur->freesync_config;
6608         state->cm_has_degamma = cur->cm_has_degamma;
6609         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6610         state->force_dpms_off = cur->force_dpms_off;
6611         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6612
6613         return &state->base;
6614 }
6615
6616 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6617 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6618 {
6619         crtc_debugfs_init(crtc);
6620
6621         return 0;
6622 }
6623 #endif
6624
6625 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6626 {
6627         enum dc_irq_source irq_source;
6628         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6629         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6630         int rc;
6631
6632         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6633
6634         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6635
6636         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6637                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6638         return rc;
6639 }
6640
6641 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6642 {
6643         enum dc_irq_source irq_source;
6644         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6645         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6646         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6647 #if defined(CONFIG_DRM_AMD_DC_DCN)
6648         struct amdgpu_display_manager *dm = &adev->dm;
6649         struct vblank_control_work *work;
6650 #endif
6651         int rc = 0;
6652
6653         if (enable) {
6654                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6655                 if (amdgpu_dm_vrr_active(acrtc_state))
6656                         rc = dm_set_vupdate_irq(crtc, true);
6657         } else {
6658                 /* vblank irq off -> vupdate irq off */
6659                 rc = dm_set_vupdate_irq(crtc, false);
6660         }
6661
6662         if (rc)
6663                 return rc;
6664
6665         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6666
6667         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6668                 return -EBUSY;
6669
6670         if (amdgpu_in_reset(adev))
6671                 return 0;
6672
6673 #if defined(CONFIG_DRM_AMD_DC_DCN)
6674         if (dm->vblank_control_workqueue) {
6675                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6676                 if (!work)
6677                         return -ENOMEM;
6678
6679                 INIT_WORK(&work->work, vblank_control_worker);
6680                 work->dm = dm;
6681                 work->acrtc = acrtc;
6682                 work->enable = enable;
6683
6684                 if (acrtc_state->stream) {
6685                         dc_stream_retain(acrtc_state->stream);
6686                         work->stream = acrtc_state->stream;
6687                 }
6688
6689                 queue_work(dm->vblank_control_workqueue, &work->work);
6690         }
6691 #endif
6692
6693         return 0;
6694 }
6695
6696 static int dm_enable_vblank(struct drm_crtc *crtc)
6697 {
6698         return dm_set_vblank(crtc, true);
6699 }
6700
6701 static void dm_disable_vblank(struct drm_crtc *crtc)
6702 {
6703         dm_set_vblank(crtc, false);
6704 }
6705
6706 /* Implemented only the options currently availible for the driver */
6707 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6708         .reset = dm_crtc_reset_state,
6709         .destroy = amdgpu_dm_crtc_destroy,
6710         .set_config = drm_atomic_helper_set_config,
6711         .page_flip = drm_atomic_helper_page_flip,
6712         .atomic_duplicate_state = dm_crtc_duplicate_state,
6713         .atomic_destroy_state = dm_crtc_destroy_state,
6714         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6715         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6716         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6717         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6718         .enable_vblank = dm_enable_vblank,
6719         .disable_vblank = dm_disable_vblank,
6720         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6721 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6722         .late_register = amdgpu_dm_crtc_late_register,
6723 #endif
6724 };
6725
6726 static enum drm_connector_status
6727 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6728 {
6729         bool connected;
6730         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6731
6732         /*
6733          * Notes:
6734          * 1. This interface is NOT called in context of HPD irq.
6735          * 2. This interface *is called* in context of user-mode ioctl. Which
6736          * makes it a bad place for *any* MST-related activity.
6737          */
6738
6739         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6740             !aconnector->fake_enable)
6741                 connected = (aconnector->dc_sink != NULL);
6742         else
6743                 connected = (aconnector->base.force == DRM_FORCE_ON);
6744
6745         update_subconnector_property(aconnector);
6746
6747         return (connected ? connector_status_connected :
6748                         connector_status_disconnected);
6749 }
6750
6751 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6752                                             struct drm_connector_state *connector_state,
6753                                             struct drm_property *property,
6754                                             uint64_t val)
6755 {
6756         struct drm_device *dev = connector->dev;
6757         struct amdgpu_device *adev = drm_to_adev(dev);
6758         struct dm_connector_state *dm_old_state =
6759                 to_dm_connector_state(connector->state);
6760         struct dm_connector_state *dm_new_state =
6761                 to_dm_connector_state(connector_state);
6762
6763         int ret = -EINVAL;
6764
6765         if (property == dev->mode_config.scaling_mode_property) {
6766                 enum amdgpu_rmx_type rmx_type;
6767
6768                 switch (val) {
6769                 case DRM_MODE_SCALE_CENTER:
6770                         rmx_type = RMX_CENTER;
6771                         break;
6772                 case DRM_MODE_SCALE_ASPECT:
6773                         rmx_type = RMX_ASPECT;
6774                         break;
6775                 case DRM_MODE_SCALE_FULLSCREEN:
6776                         rmx_type = RMX_FULL;
6777                         break;
6778                 case DRM_MODE_SCALE_NONE:
6779                 default:
6780                         rmx_type = RMX_OFF;
6781                         break;
6782                 }
6783
6784                 if (dm_old_state->scaling == rmx_type)
6785                         return 0;
6786
6787                 dm_new_state->scaling = rmx_type;
6788                 ret = 0;
6789         } else if (property == adev->mode_info.underscan_hborder_property) {
6790                 dm_new_state->underscan_hborder = val;
6791                 ret = 0;
6792         } else if (property == adev->mode_info.underscan_vborder_property) {
6793                 dm_new_state->underscan_vborder = val;
6794                 ret = 0;
6795         } else if (property == adev->mode_info.underscan_property) {
6796                 dm_new_state->underscan_enable = val;
6797                 ret = 0;
6798         } else if (property == adev->mode_info.abm_level_property) {
6799                 dm_new_state->abm_level = val;
6800                 ret = 0;
6801         }
6802
6803         return ret;
6804 }
6805
6806 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6807                                             const struct drm_connector_state *state,
6808                                             struct drm_property *property,
6809                                             uint64_t *val)
6810 {
6811         struct drm_device *dev = connector->dev;
6812         struct amdgpu_device *adev = drm_to_adev(dev);
6813         struct dm_connector_state *dm_state =
6814                 to_dm_connector_state(state);
6815         int ret = -EINVAL;
6816
6817         if (property == dev->mode_config.scaling_mode_property) {
6818                 switch (dm_state->scaling) {
6819                 case RMX_CENTER:
6820                         *val = DRM_MODE_SCALE_CENTER;
6821                         break;
6822                 case RMX_ASPECT:
6823                         *val = DRM_MODE_SCALE_ASPECT;
6824                         break;
6825                 case RMX_FULL:
6826                         *val = DRM_MODE_SCALE_FULLSCREEN;
6827                         break;
6828                 case RMX_OFF:
6829                 default:
6830                         *val = DRM_MODE_SCALE_NONE;
6831                         break;
6832                 }
6833                 ret = 0;
6834         } else if (property == adev->mode_info.underscan_hborder_property) {
6835                 *val = dm_state->underscan_hborder;
6836                 ret = 0;
6837         } else if (property == adev->mode_info.underscan_vborder_property) {
6838                 *val = dm_state->underscan_vborder;
6839                 ret = 0;
6840         } else if (property == adev->mode_info.underscan_property) {
6841                 *val = dm_state->underscan_enable;
6842                 ret = 0;
6843         } else if (property == adev->mode_info.abm_level_property) {
6844                 *val = dm_state->abm_level;
6845                 ret = 0;
6846         }
6847
6848         return ret;
6849 }
6850
6851 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6852 {
6853         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6854
6855         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6856 }
6857
6858 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6859 {
6860         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6861         const struct dc_link *link = aconnector->dc_link;
6862         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6863         struct amdgpu_display_manager *dm = &adev->dm;
6864         int i;
6865
6866         /*
6867          * Call only if mst_mgr was iniitalized before since it's not done
6868          * for all connector types.
6869          */
6870         if (aconnector->mst_mgr.dev)
6871                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6872
6873 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6874         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6875         for (i = 0; i < dm->num_of_edps; i++) {
6876                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6877                         backlight_device_unregister(dm->backlight_dev[i]);
6878                         dm->backlight_dev[i] = NULL;
6879                 }
6880         }
6881 #endif
6882
6883         if (aconnector->dc_em_sink)
6884                 dc_sink_release(aconnector->dc_em_sink);
6885         aconnector->dc_em_sink = NULL;
6886         if (aconnector->dc_sink)
6887                 dc_sink_release(aconnector->dc_sink);
6888         aconnector->dc_sink = NULL;
6889
6890         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6891         drm_connector_unregister(connector);
6892         drm_connector_cleanup(connector);
6893         if (aconnector->i2c) {
6894                 i2c_del_adapter(&aconnector->i2c->base);
6895                 kfree(aconnector->i2c);
6896         }
6897         kfree(aconnector->dm_dp_aux.aux.name);
6898
6899         kfree(connector);
6900 }
6901
6902 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6903 {
6904         struct dm_connector_state *state =
6905                 to_dm_connector_state(connector->state);
6906
6907         if (connector->state)
6908                 __drm_atomic_helper_connector_destroy_state(connector->state);
6909
6910         kfree(state);
6911
6912         state = kzalloc(sizeof(*state), GFP_KERNEL);
6913
6914         if (state) {
6915                 state->scaling = RMX_OFF;
6916                 state->underscan_enable = false;
6917                 state->underscan_hborder = 0;
6918                 state->underscan_vborder = 0;
6919                 state->base.max_requested_bpc = 8;
6920                 state->vcpi_slots = 0;
6921                 state->pbn = 0;
6922                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6923                         state->abm_level = amdgpu_dm_abm_level;
6924
6925                 __drm_atomic_helper_connector_reset(connector, &state->base);
6926         }
6927 }
6928
6929 struct drm_connector_state *
6930 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6931 {
6932         struct dm_connector_state *state =
6933                 to_dm_connector_state(connector->state);
6934
6935         struct dm_connector_state *new_state =
6936                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6937
6938         if (!new_state)
6939                 return NULL;
6940
6941         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6942
6943         new_state->freesync_capable = state->freesync_capable;
6944         new_state->abm_level = state->abm_level;
6945         new_state->scaling = state->scaling;
6946         new_state->underscan_enable = state->underscan_enable;
6947         new_state->underscan_hborder = state->underscan_hborder;
6948         new_state->underscan_vborder = state->underscan_vborder;
6949         new_state->vcpi_slots = state->vcpi_slots;
6950         new_state->pbn = state->pbn;
6951         return &new_state->base;
6952 }
6953
6954 static int
6955 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6956 {
6957         struct amdgpu_dm_connector *amdgpu_dm_connector =
6958                 to_amdgpu_dm_connector(connector);
6959         int r;
6960
6961         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6962             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6963                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6964                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6965                 if (r)
6966                         return r;
6967         }
6968
6969 #if defined(CONFIG_DEBUG_FS)
6970         connector_debugfs_init(amdgpu_dm_connector);
6971 #endif
6972
6973         return 0;
6974 }
6975
6976 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6977         .reset = amdgpu_dm_connector_funcs_reset,
6978         .detect = amdgpu_dm_connector_detect,
6979         .fill_modes = drm_helper_probe_single_connector_modes,
6980         .destroy = amdgpu_dm_connector_destroy,
6981         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6982         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6983         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6984         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6985         .late_register = amdgpu_dm_connector_late_register,
6986         .early_unregister = amdgpu_dm_connector_unregister
6987 };
6988
6989 static int get_modes(struct drm_connector *connector)
6990 {
6991         return amdgpu_dm_connector_get_modes(connector);
6992 }
6993
6994 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6995 {
6996         struct dc_sink_init_data init_params = {
6997                         .link = aconnector->dc_link,
6998                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6999         };
7000         struct edid *edid;
7001
7002         if (!aconnector->base.edid_blob_ptr) {
7003                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7004                                 aconnector->base.name);
7005
7006                 aconnector->base.force = DRM_FORCE_OFF;
7007                 aconnector->base.override_edid = false;
7008                 return;
7009         }
7010
7011         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7012
7013         aconnector->edid = edid;
7014
7015         aconnector->dc_em_sink = dc_link_add_remote_sink(
7016                 aconnector->dc_link,
7017                 (uint8_t *)edid,
7018                 (edid->extensions + 1) * EDID_LENGTH,
7019                 &init_params);
7020
7021         if (aconnector->base.force == DRM_FORCE_ON) {
7022                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7023                 aconnector->dc_link->local_sink :
7024                 aconnector->dc_em_sink;
7025                 dc_sink_retain(aconnector->dc_sink);
7026         }
7027 }
7028
7029 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7030 {
7031         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7032
7033         /*
7034          * In case of headless boot with force on for DP managed connector
7035          * Those settings have to be != 0 to get initial modeset
7036          */
7037         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7038                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7039                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7040         }
7041
7042
7043         aconnector->base.override_edid = true;
7044         create_eml_sink(aconnector);
7045 }
7046
7047 struct dc_stream_state *
7048 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7049                                 const struct drm_display_mode *drm_mode,
7050                                 const struct dm_connector_state *dm_state,
7051                                 const struct dc_stream_state *old_stream)
7052 {
7053         struct drm_connector *connector = &aconnector->base;
7054         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7055         struct dc_stream_state *stream;
7056         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7057         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7058         enum dc_status dc_result = DC_OK;
7059
7060         do {
7061                 stream = create_stream_for_sink(aconnector, drm_mode,
7062                                                 dm_state, old_stream,
7063                                                 requested_bpc);
7064                 if (stream == NULL) {
7065                         DRM_ERROR("Failed to create stream for sink!\n");
7066                         break;
7067                 }
7068
7069                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7070
7071                 if (dc_result != DC_OK) {
7072                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7073                                       drm_mode->hdisplay,
7074                                       drm_mode->vdisplay,
7075                                       drm_mode->clock,
7076                                       dc_result,
7077                                       dc_status_to_str(dc_result));
7078
7079                         dc_stream_release(stream);
7080                         stream = NULL;
7081                         requested_bpc -= 2; /* lower bpc to retry validation */
7082                 }
7083
7084         } while (stream == NULL && requested_bpc >= 6);
7085
7086         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7087                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7088
7089                 aconnector->force_yuv420_output = true;
7090                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7091                                                 dm_state, old_stream);
7092                 aconnector->force_yuv420_output = false;
7093         }
7094
7095         return stream;
7096 }
7097
7098 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7099                                    struct drm_display_mode *mode)
7100 {
7101         int result = MODE_ERROR;
7102         struct dc_sink *dc_sink;
7103         /* TODO: Unhardcode stream count */
7104         struct dc_stream_state *stream;
7105         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7106
7107         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7108                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7109                 return result;
7110
7111         /*
7112          * Only run this the first time mode_valid is called to initilialize
7113          * EDID mgmt
7114          */
7115         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7116                 !aconnector->dc_em_sink)
7117                 handle_edid_mgmt(aconnector);
7118
7119         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7120
7121         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7122                                 aconnector->base.force != DRM_FORCE_ON) {
7123                 DRM_ERROR("dc_sink is NULL!\n");
7124                 goto fail;
7125         }
7126
7127         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7128         if (stream) {
7129                 dc_stream_release(stream);
7130                 result = MODE_OK;
7131         }
7132
7133 fail:
7134         /* TODO: error handling*/
7135         return result;
7136 }
7137
7138 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7139                                 struct dc_info_packet *out)
7140 {
7141         struct hdmi_drm_infoframe frame;
7142         unsigned char buf[30]; /* 26 + 4 */
7143         ssize_t len;
7144         int ret, i;
7145
7146         memset(out, 0, sizeof(*out));
7147
7148         if (!state->hdr_output_metadata)
7149                 return 0;
7150
7151         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7152         if (ret)
7153                 return ret;
7154
7155         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7156         if (len < 0)
7157                 return (int)len;
7158
7159         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7160         if (len != 30)
7161                 return -EINVAL;
7162
7163         /* Prepare the infopacket for DC. */
7164         switch (state->connector->connector_type) {
7165         case DRM_MODE_CONNECTOR_HDMIA:
7166                 out->hb0 = 0x87; /* type */
7167                 out->hb1 = 0x01; /* version */
7168                 out->hb2 = 0x1A; /* length */
7169                 out->sb[0] = buf[3]; /* checksum */
7170                 i = 1;
7171                 break;
7172
7173         case DRM_MODE_CONNECTOR_DisplayPort:
7174         case DRM_MODE_CONNECTOR_eDP:
7175                 out->hb0 = 0x00; /* sdp id, zero */
7176                 out->hb1 = 0x87; /* type */
7177                 out->hb2 = 0x1D; /* payload len - 1 */
7178                 out->hb3 = (0x13 << 2); /* sdp version */
7179                 out->sb[0] = 0x01; /* version */
7180                 out->sb[1] = 0x1A; /* length */
7181                 i = 2;
7182                 break;
7183
7184         default:
7185                 return -EINVAL;
7186         }
7187
7188         memcpy(&out->sb[i], &buf[4], 26);
7189         out->valid = true;
7190
7191         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7192                        sizeof(out->sb), false);
7193
7194         return 0;
7195 }
7196
7197 static int
7198 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7199                                  struct drm_atomic_state *state)
7200 {
7201         struct drm_connector_state *new_con_state =
7202                 drm_atomic_get_new_connector_state(state, conn);
7203         struct drm_connector_state *old_con_state =
7204                 drm_atomic_get_old_connector_state(state, conn);
7205         struct drm_crtc *crtc = new_con_state->crtc;
7206         struct drm_crtc_state *new_crtc_state;
7207         int ret;
7208
7209         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7210
7211         if (!crtc)
7212                 return 0;
7213
7214         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7215                 struct dc_info_packet hdr_infopacket;
7216
7217                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7218                 if (ret)
7219                         return ret;
7220
7221                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7222                 if (IS_ERR(new_crtc_state))
7223                         return PTR_ERR(new_crtc_state);
7224
7225                 /*
7226                  * DC considers the stream backends changed if the
7227                  * static metadata changes. Forcing the modeset also
7228                  * gives a simple way for userspace to switch from
7229                  * 8bpc to 10bpc when setting the metadata to enter
7230                  * or exit HDR.
7231                  *
7232                  * Changing the static metadata after it's been
7233                  * set is permissible, however. So only force a
7234                  * modeset if we're entering or exiting HDR.
7235                  */
7236                 new_crtc_state->mode_changed =
7237                         !old_con_state->hdr_output_metadata ||
7238                         !new_con_state->hdr_output_metadata;
7239         }
7240
7241         return 0;
7242 }
7243
7244 static const struct drm_connector_helper_funcs
7245 amdgpu_dm_connector_helper_funcs = {
7246         /*
7247          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7248          * modes will be filtered by drm_mode_validate_size(), and those modes
7249          * are missing after user start lightdm. So we need to renew modes list.
7250          * in get_modes call back, not just return the modes count
7251          */
7252         .get_modes = get_modes,
7253         .mode_valid = amdgpu_dm_connector_mode_valid,
7254         .atomic_check = amdgpu_dm_connector_atomic_check,
7255 };
7256
7257 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7258 {
7259 }
7260
7261 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7262 {
7263         struct drm_atomic_state *state = new_crtc_state->state;
7264         struct drm_plane *plane;
7265         int num_active = 0;
7266
7267         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7268                 struct drm_plane_state *new_plane_state;
7269
7270                 /* Cursor planes are "fake". */
7271                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7272                         continue;
7273
7274                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7275
7276                 if (!new_plane_state) {
7277                         /*
7278                          * The plane is enable on the CRTC and hasn't changed
7279                          * state. This means that it previously passed
7280                          * validation and is therefore enabled.
7281                          */
7282                         num_active += 1;
7283                         continue;
7284                 }
7285
7286                 /* We need a framebuffer to be considered enabled. */
7287                 num_active += (new_plane_state->fb != NULL);
7288         }
7289
7290         return num_active;
7291 }
7292
7293 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7294                                          struct drm_crtc_state *new_crtc_state)
7295 {
7296         struct dm_crtc_state *dm_new_crtc_state =
7297                 to_dm_crtc_state(new_crtc_state);
7298
7299         dm_new_crtc_state->active_planes = 0;
7300
7301         if (!dm_new_crtc_state->stream)
7302                 return;
7303
7304         dm_new_crtc_state->active_planes =
7305                 count_crtc_active_planes(new_crtc_state);
7306 }
7307
7308 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7309                                        struct drm_atomic_state *state)
7310 {
7311         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7312                                                                           crtc);
7313         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7314         struct dc *dc = adev->dm.dc;
7315         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7316         int ret = -EINVAL;
7317
7318         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7319
7320         dm_update_crtc_active_planes(crtc, crtc_state);
7321
7322         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7323                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7324                 return ret;
7325         }
7326
7327         /*
7328          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7329          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7330          * planes are disabled, which is not supported by the hardware. And there is legacy
7331          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7332          */
7333         if (crtc_state->enable &&
7334             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7335                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7336                 return -EINVAL;
7337         }
7338
7339         /* In some use cases, like reset, no stream is attached */
7340         if (!dm_crtc_state->stream)
7341                 return 0;
7342
7343         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7344                 return 0;
7345
7346         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7347         return ret;
7348 }
7349
7350 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7351                                       const struct drm_display_mode *mode,
7352                                       struct drm_display_mode *adjusted_mode)
7353 {
7354         return true;
7355 }
7356
7357 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7358         .disable = dm_crtc_helper_disable,
7359         .atomic_check = dm_crtc_helper_atomic_check,
7360         .mode_fixup = dm_crtc_helper_mode_fixup,
7361         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7362 };
7363
7364 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7365 {
7366
7367 }
7368
7369 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7370 {
7371         switch (display_color_depth) {
7372                 case COLOR_DEPTH_666:
7373                         return 6;
7374                 case COLOR_DEPTH_888:
7375                         return 8;
7376                 case COLOR_DEPTH_101010:
7377                         return 10;
7378                 case COLOR_DEPTH_121212:
7379                         return 12;
7380                 case COLOR_DEPTH_141414:
7381                         return 14;
7382                 case COLOR_DEPTH_161616:
7383                         return 16;
7384                 default:
7385                         break;
7386                 }
7387         return 0;
7388 }
7389
7390 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7391                                           struct drm_crtc_state *crtc_state,
7392                                           struct drm_connector_state *conn_state)
7393 {
7394         struct drm_atomic_state *state = crtc_state->state;
7395         struct drm_connector *connector = conn_state->connector;
7396         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7397         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7398         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7399         struct drm_dp_mst_topology_mgr *mst_mgr;
7400         struct drm_dp_mst_port *mst_port;
7401         enum dc_color_depth color_depth;
7402         int clock, bpp = 0;
7403         bool is_y420 = false;
7404
7405         if (!aconnector->port || !aconnector->dc_sink)
7406                 return 0;
7407
7408         mst_port = aconnector->port;
7409         mst_mgr = &aconnector->mst_port->mst_mgr;
7410
7411         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7412                 return 0;
7413
7414         if (!state->duplicated) {
7415                 int max_bpc = conn_state->max_requested_bpc;
7416                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7417                                 aconnector->force_yuv420_output;
7418                 color_depth = convert_color_depth_from_display_info(connector,
7419                                                                     is_y420,
7420                                                                     max_bpc);
7421                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7422                 clock = adjusted_mode->clock;
7423                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7424         }
7425         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7426                                                                            mst_mgr,
7427                                                                            mst_port,
7428                                                                            dm_new_connector_state->pbn,
7429                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7430         if (dm_new_connector_state->vcpi_slots < 0) {
7431                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7432                 return dm_new_connector_state->vcpi_slots;
7433         }
7434         return 0;
7435 }
7436
7437 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7438         .disable = dm_encoder_helper_disable,
7439         .atomic_check = dm_encoder_helper_atomic_check
7440 };
7441
7442 #if defined(CONFIG_DRM_AMD_DC_DCN)
7443 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7444                                             struct dc_state *dc_state,
7445                                             struct dsc_mst_fairness_vars *vars)
7446 {
7447         struct dc_stream_state *stream = NULL;
7448         struct drm_connector *connector;
7449         struct drm_connector_state *new_con_state;
7450         struct amdgpu_dm_connector *aconnector;
7451         struct dm_connector_state *dm_conn_state;
7452         int i, j;
7453         int vcpi, pbn_div, pbn, slot_num = 0;
7454
7455         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7456
7457                 aconnector = to_amdgpu_dm_connector(connector);
7458
7459                 if (!aconnector->port)
7460                         continue;
7461
7462                 if (!new_con_state || !new_con_state->crtc)
7463                         continue;
7464
7465                 dm_conn_state = to_dm_connector_state(new_con_state);
7466
7467                 for (j = 0; j < dc_state->stream_count; j++) {
7468                         stream = dc_state->streams[j];
7469                         if (!stream)
7470                                 continue;
7471
7472                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7473                                 break;
7474
7475                         stream = NULL;
7476                 }
7477
7478                 if (!stream)
7479                         continue;
7480
7481                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7482                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7483                 for (j = 0; j < dc_state->stream_count; j++) {
7484                         if (vars[j].aconnector == aconnector) {
7485                                 pbn = vars[j].pbn;
7486                                 break;
7487                         }
7488                 }
7489
7490                 if (j == dc_state->stream_count)
7491                         continue;
7492
7493                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7494
7495                 if (stream->timing.flags.DSC != 1) {
7496                         dm_conn_state->pbn = pbn;
7497                         dm_conn_state->vcpi_slots = slot_num;
7498
7499                         drm_dp_mst_atomic_enable_dsc(state,
7500                                                      aconnector->port,
7501                                                      dm_conn_state->pbn,
7502                                                      0,
7503                                                      false);
7504                         continue;
7505                 }
7506
7507                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7508                                                     aconnector->port,
7509                                                     pbn, pbn_div,
7510                                                     true);
7511                 if (vcpi < 0)
7512                         return vcpi;
7513
7514                 dm_conn_state->pbn = pbn;
7515                 dm_conn_state->vcpi_slots = vcpi;
7516         }
7517         return 0;
7518 }
7519 #endif
7520
7521 static void dm_drm_plane_reset(struct drm_plane *plane)
7522 {
7523         struct dm_plane_state *amdgpu_state = NULL;
7524
7525         if (plane->state)
7526                 plane->funcs->atomic_destroy_state(plane, plane->state);
7527
7528         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7529         WARN_ON(amdgpu_state == NULL);
7530
7531         if (amdgpu_state)
7532                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7533 }
7534
7535 static struct drm_plane_state *
7536 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7537 {
7538         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7539
7540         old_dm_plane_state = to_dm_plane_state(plane->state);
7541         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7542         if (!dm_plane_state)
7543                 return NULL;
7544
7545         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7546
7547         if (old_dm_plane_state->dc_state) {
7548                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7549                 dc_plane_state_retain(dm_plane_state->dc_state);
7550         }
7551
7552         return &dm_plane_state->base;
7553 }
7554
7555 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7556                                 struct drm_plane_state *state)
7557 {
7558         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7559
7560         if (dm_plane_state->dc_state)
7561                 dc_plane_state_release(dm_plane_state->dc_state);
7562
7563         drm_atomic_helper_plane_destroy_state(plane, state);
7564 }
7565
7566 static const struct drm_plane_funcs dm_plane_funcs = {
7567         .update_plane   = drm_atomic_helper_update_plane,
7568         .disable_plane  = drm_atomic_helper_disable_plane,
7569         .destroy        = drm_primary_helper_destroy,
7570         .reset = dm_drm_plane_reset,
7571         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7572         .atomic_destroy_state = dm_drm_plane_destroy_state,
7573         .format_mod_supported = dm_plane_format_mod_supported,
7574 };
7575
7576 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7577                                       struct drm_plane_state *new_state)
7578 {
7579         struct amdgpu_framebuffer *afb;
7580         struct drm_gem_object *obj;
7581         struct amdgpu_device *adev;
7582         struct amdgpu_bo *rbo;
7583         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7584         struct list_head list;
7585         struct ttm_validate_buffer tv;
7586         struct ww_acquire_ctx ticket;
7587         uint32_t domain;
7588         int r;
7589
7590         if (!new_state->fb) {
7591                 DRM_DEBUG_KMS("No FB bound\n");
7592                 return 0;
7593         }
7594
7595         afb = to_amdgpu_framebuffer(new_state->fb);
7596         obj = new_state->fb->obj[0];
7597         rbo = gem_to_amdgpu_bo(obj);
7598         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7599         INIT_LIST_HEAD(&list);
7600
7601         tv.bo = &rbo->tbo;
7602         tv.num_shared = 1;
7603         list_add(&tv.head, &list);
7604
7605         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7606         if (r) {
7607                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7608                 return r;
7609         }
7610
7611         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7612                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7613         else
7614                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7615
7616         r = amdgpu_bo_pin(rbo, domain);
7617         if (unlikely(r != 0)) {
7618                 if (r != -ERESTARTSYS)
7619                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7620                 ttm_eu_backoff_reservation(&ticket, &list);
7621                 return r;
7622         }
7623
7624         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7625         if (unlikely(r != 0)) {
7626                 amdgpu_bo_unpin(rbo);
7627                 ttm_eu_backoff_reservation(&ticket, &list);
7628                 DRM_ERROR("%p bind failed\n", rbo);
7629                 return r;
7630         }
7631
7632         ttm_eu_backoff_reservation(&ticket, &list);
7633
7634         afb->address = amdgpu_bo_gpu_offset(rbo);
7635
7636         amdgpu_bo_ref(rbo);
7637
7638         /**
7639          * We don't do surface updates on planes that have been newly created,
7640          * but we also don't have the afb->address during atomic check.
7641          *
7642          * Fill in buffer attributes depending on the address here, but only on
7643          * newly created planes since they're not being used by DC yet and this
7644          * won't modify global state.
7645          */
7646         dm_plane_state_old = to_dm_plane_state(plane->state);
7647         dm_plane_state_new = to_dm_plane_state(new_state);
7648
7649         if (dm_plane_state_new->dc_state &&
7650             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7651                 struct dc_plane_state *plane_state =
7652                         dm_plane_state_new->dc_state;
7653                 bool force_disable_dcc = !plane_state->dcc.enable;
7654
7655                 fill_plane_buffer_attributes(
7656                         adev, afb, plane_state->format, plane_state->rotation,
7657                         afb->tiling_flags,
7658                         &plane_state->tiling_info, &plane_state->plane_size,
7659                         &plane_state->dcc, &plane_state->address,
7660                         afb->tmz_surface, force_disable_dcc);
7661         }
7662
7663         return 0;
7664 }
7665
7666 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7667                                        struct drm_plane_state *old_state)
7668 {
7669         struct amdgpu_bo *rbo;
7670         int r;
7671
7672         if (!old_state->fb)
7673                 return;
7674
7675         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7676         r = amdgpu_bo_reserve(rbo, false);
7677         if (unlikely(r)) {
7678                 DRM_ERROR("failed to reserve rbo before unpin\n");
7679                 return;
7680         }
7681
7682         amdgpu_bo_unpin(rbo);
7683         amdgpu_bo_unreserve(rbo);
7684         amdgpu_bo_unref(&rbo);
7685 }
7686
7687 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7688                                        struct drm_crtc_state *new_crtc_state)
7689 {
7690         struct drm_framebuffer *fb = state->fb;
7691         int min_downscale, max_upscale;
7692         int min_scale = 0;
7693         int max_scale = INT_MAX;
7694
7695         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7696         if (fb && state->crtc) {
7697                 /* Validate viewport to cover the case when only the position changes */
7698                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7699                         int viewport_width = state->crtc_w;
7700                         int viewport_height = state->crtc_h;
7701
7702                         if (state->crtc_x < 0)
7703                                 viewport_width += state->crtc_x;
7704                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7705                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7706
7707                         if (state->crtc_y < 0)
7708                                 viewport_height += state->crtc_y;
7709                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7710                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7711
7712                         if (viewport_width < 0 || viewport_height < 0) {
7713                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7714                                 return -EINVAL;
7715                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7716                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7717                                 return -EINVAL;
7718                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7719                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7720                                 return -EINVAL;
7721                         }
7722
7723                 }
7724
7725                 /* Get min/max allowed scaling factors from plane caps. */
7726                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7727                                              &min_downscale, &max_upscale);
7728                 /*
7729                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7730                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7731                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7732                  */
7733                 min_scale = (1000 << 16) / max_upscale;
7734                 max_scale = (1000 << 16) / min_downscale;
7735         }
7736
7737         return drm_atomic_helper_check_plane_state(
7738                 state, new_crtc_state, min_scale, max_scale, true, true);
7739 }
7740
7741 static int dm_plane_atomic_check(struct drm_plane *plane,
7742                                  struct drm_atomic_state *state)
7743 {
7744         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7745                                                                                  plane);
7746         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7747         struct dc *dc = adev->dm.dc;
7748         struct dm_plane_state *dm_plane_state;
7749         struct dc_scaling_info scaling_info;
7750         struct drm_crtc_state *new_crtc_state;
7751         int ret;
7752
7753         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7754
7755         dm_plane_state = to_dm_plane_state(new_plane_state);
7756
7757         if (!dm_plane_state->dc_state)
7758                 return 0;
7759
7760         new_crtc_state =
7761                 drm_atomic_get_new_crtc_state(state,
7762                                               new_plane_state->crtc);
7763         if (!new_crtc_state)
7764                 return -EINVAL;
7765
7766         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7767         if (ret)
7768                 return ret;
7769
7770         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7771         if (ret)
7772                 return ret;
7773
7774         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7775                 return 0;
7776
7777         return -EINVAL;
7778 }
7779
7780 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7781                                        struct drm_atomic_state *state)
7782 {
7783         /* Only support async updates on cursor planes. */
7784         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7785                 return -EINVAL;
7786
7787         return 0;
7788 }
7789
7790 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7791                                          struct drm_atomic_state *state)
7792 {
7793         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7794                                                                            plane);
7795         struct drm_plane_state *old_state =
7796                 drm_atomic_get_old_plane_state(state, plane);
7797
7798         trace_amdgpu_dm_atomic_update_cursor(new_state);
7799
7800         swap(plane->state->fb, new_state->fb);
7801
7802         plane->state->src_x = new_state->src_x;
7803         plane->state->src_y = new_state->src_y;
7804         plane->state->src_w = new_state->src_w;
7805         plane->state->src_h = new_state->src_h;
7806         plane->state->crtc_x = new_state->crtc_x;
7807         plane->state->crtc_y = new_state->crtc_y;
7808         plane->state->crtc_w = new_state->crtc_w;
7809         plane->state->crtc_h = new_state->crtc_h;
7810
7811         handle_cursor_update(plane, old_state);
7812 }
7813
7814 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7815         .prepare_fb = dm_plane_helper_prepare_fb,
7816         .cleanup_fb = dm_plane_helper_cleanup_fb,
7817         .atomic_check = dm_plane_atomic_check,
7818         .atomic_async_check = dm_plane_atomic_async_check,
7819         .atomic_async_update = dm_plane_atomic_async_update
7820 };
7821
7822 /*
7823  * TODO: these are currently initialized to rgb formats only.
7824  * For future use cases we should either initialize them dynamically based on
7825  * plane capabilities, or initialize this array to all formats, so internal drm
7826  * check will succeed, and let DC implement proper check
7827  */
7828 static const uint32_t rgb_formats[] = {
7829         DRM_FORMAT_XRGB8888,
7830         DRM_FORMAT_ARGB8888,
7831         DRM_FORMAT_RGBA8888,
7832         DRM_FORMAT_XRGB2101010,
7833         DRM_FORMAT_XBGR2101010,
7834         DRM_FORMAT_ARGB2101010,
7835         DRM_FORMAT_ABGR2101010,
7836         DRM_FORMAT_XRGB16161616,
7837         DRM_FORMAT_XBGR16161616,
7838         DRM_FORMAT_ARGB16161616,
7839         DRM_FORMAT_ABGR16161616,
7840         DRM_FORMAT_XBGR8888,
7841         DRM_FORMAT_ABGR8888,
7842         DRM_FORMAT_RGB565,
7843 };
7844
7845 static const uint32_t overlay_formats[] = {
7846         DRM_FORMAT_XRGB8888,
7847         DRM_FORMAT_ARGB8888,
7848         DRM_FORMAT_RGBA8888,
7849         DRM_FORMAT_XBGR8888,
7850         DRM_FORMAT_ABGR8888,
7851         DRM_FORMAT_RGB565
7852 };
7853
7854 static const u32 cursor_formats[] = {
7855         DRM_FORMAT_ARGB8888
7856 };
7857
7858 static int get_plane_formats(const struct drm_plane *plane,
7859                              const struct dc_plane_cap *plane_cap,
7860                              uint32_t *formats, int max_formats)
7861 {
7862         int i, num_formats = 0;
7863
7864         /*
7865          * TODO: Query support for each group of formats directly from
7866          * DC plane caps. This will require adding more formats to the
7867          * caps list.
7868          */
7869
7870         switch (plane->type) {
7871         case DRM_PLANE_TYPE_PRIMARY:
7872                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7873                         if (num_formats >= max_formats)
7874                                 break;
7875
7876                         formats[num_formats++] = rgb_formats[i];
7877                 }
7878
7879                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7880                         formats[num_formats++] = DRM_FORMAT_NV12;
7881                 if (plane_cap && plane_cap->pixel_format_support.p010)
7882                         formats[num_formats++] = DRM_FORMAT_P010;
7883                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7884                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7885                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7886                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7887                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7888                 }
7889                 break;
7890
7891         case DRM_PLANE_TYPE_OVERLAY:
7892                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7893                         if (num_formats >= max_formats)
7894                                 break;
7895
7896                         formats[num_formats++] = overlay_formats[i];
7897                 }
7898                 break;
7899
7900         case DRM_PLANE_TYPE_CURSOR:
7901                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7902                         if (num_formats >= max_formats)
7903                                 break;
7904
7905                         formats[num_formats++] = cursor_formats[i];
7906                 }
7907                 break;
7908         }
7909
7910         return num_formats;
7911 }
7912
7913 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7914                                 struct drm_plane *plane,
7915                                 unsigned long possible_crtcs,
7916                                 const struct dc_plane_cap *plane_cap)
7917 {
7918         uint32_t formats[32];
7919         int num_formats;
7920         int res = -EPERM;
7921         unsigned int supported_rotations;
7922         uint64_t *modifiers = NULL;
7923
7924         num_formats = get_plane_formats(plane, plane_cap, formats,
7925                                         ARRAY_SIZE(formats));
7926
7927         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7928         if (res)
7929                 return res;
7930
7931         if (modifiers == NULL)
7932                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7933
7934         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7935                                        &dm_plane_funcs, formats, num_formats,
7936                                        modifiers, plane->type, NULL);
7937         kfree(modifiers);
7938         if (res)
7939                 return res;
7940
7941         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7942             plane_cap && plane_cap->per_pixel_alpha) {
7943                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7944                                           BIT(DRM_MODE_BLEND_PREMULTI);
7945
7946                 drm_plane_create_alpha_property(plane);
7947                 drm_plane_create_blend_mode_property(plane, blend_caps);
7948         }
7949
7950         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7951             plane_cap &&
7952             (plane_cap->pixel_format_support.nv12 ||
7953              plane_cap->pixel_format_support.p010)) {
7954                 /* This only affects YUV formats. */
7955                 drm_plane_create_color_properties(
7956                         plane,
7957                         BIT(DRM_COLOR_YCBCR_BT601) |
7958                         BIT(DRM_COLOR_YCBCR_BT709) |
7959                         BIT(DRM_COLOR_YCBCR_BT2020),
7960                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7961                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7962                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7963         }
7964
7965         supported_rotations =
7966                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7967                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7968
7969         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7970             plane->type != DRM_PLANE_TYPE_CURSOR)
7971                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7972                                                    supported_rotations);
7973
7974         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7975
7976         /* Create (reset) the plane state */
7977         if (plane->funcs->reset)
7978                 plane->funcs->reset(plane);
7979
7980         return 0;
7981 }
7982
7983 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7984                                struct drm_plane *plane,
7985                                uint32_t crtc_index)
7986 {
7987         struct amdgpu_crtc *acrtc = NULL;
7988         struct drm_plane *cursor_plane;
7989
7990         int res = -ENOMEM;
7991
7992         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7993         if (!cursor_plane)
7994                 goto fail;
7995
7996         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7997         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7998
7999         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8000         if (!acrtc)
8001                 goto fail;
8002
8003         res = drm_crtc_init_with_planes(
8004                         dm->ddev,
8005                         &acrtc->base,
8006                         plane,
8007                         cursor_plane,
8008                         &amdgpu_dm_crtc_funcs, NULL);
8009
8010         if (res)
8011                 goto fail;
8012
8013         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8014
8015         /* Create (reset) the plane state */
8016         if (acrtc->base.funcs->reset)
8017                 acrtc->base.funcs->reset(&acrtc->base);
8018
8019         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8020         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8021
8022         acrtc->crtc_id = crtc_index;
8023         acrtc->base.enabled = false;
8024         acrtc->otg_inst = -1;
8025
8026         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8027         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8028                                    true, MAX_COLOR_LUT_ENTRIES);
8029         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8030
8031         return 0;
8032
8033 fail:
8034         kfree(acrtc);
8035         kfree(cursor_plane);
8036         return res;
8037 }
8038
8039
8040 static int to_drm_connector_type(enum signal_type st)
8041 {
8042         switch (st) {
8043         case SIGNAL_TYPE_HDMI_TYPE_A:
8044                 return DRM_MODE_CONNECTOR_HDMIA;
8045         case SIGNAL_TYPE_EDP:
8046                 return DRM_MODE_CONNECTOR_eDP;
8047         case SIGNAL_TYPE_LVDS:
8048                 return DRM_MODE_CONNECTOR_LVDS;
8049         case SIGNAL_TYPE_RGB:
8050                 return DRM_MODE_CONNECTOR_VGA;
8051         case SIGNAL_TYPE_DISPLAY_PORT:
8052         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8053                 return DRM_MODE_CONNECTOR_DisplayPort;
8054         case SIGNAL_TYPE_DVI_DUAL_LINK:
8055         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8056                 return DRM_MODE_CONNECTOR_DVID;
8057         case SIGNAL_TYPE_VIRTUAL:
8058                 return DRM_MODE_CONNECTOR_VIRTUAL;
8059
8060         default:
8061                 return DRM_MODE_CONNECTOR_Unknown;
8062         }
8063 }
8064
8065 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8066 {
8067         struct drm_encoder *encoder;
8068
8069         /* There is only one encoder per connector */
8070         drm_connector_for_each_possible_encoder(connector, encoder)
8071                 return encoder;
8072
8073         return NULL;
8074 }
8075
8076 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8077 {
8078         struct drm_encoder *encoder;
8079         struct amdgpu_encoder *amdgpu_encoder;
8080
8081         encoder = amdgpu_dm_connector_to_encoder(connector);
8082
8083         if (encoder == NULL)
8084                 return;
8085
8086         amdgpu_encoder = to_amdgpu_encoder(encoder);
8087
8088         amdgpu_encoder->native_mode.clock = 0;
8089
8090         if (!list_empty(&connector->probed_modes)) {
8091                 struct drm_display_mode *preferred_mode = NULL;
8092
8093                 list_for_each_entry(preferred_mode,
8094                                     &connector->probed_modes,
8095                                     head) {
8096                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8097                                 amdgpu_encoder->native_mode = *preferred_mode;
8098
8099                         break;
8100                 }
8101
8102         }
8103 }
8104
8105 static struct drm_display_mode *
8106 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8107                              char *name,
8108                              int hdisplay, int vdisplay)
8109 {
8110         struct drm_device *dev = encoder->dev;
8111         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8112         struct drm_display_mode *mode = NULL;
8113         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8114
8115         mode = drm_mode_duplicate(dev, native_mode);
8116
8117         if (mode == NULL)
8118                 return NULL;
8119
8120         mode->hdisplay = hdisplay;
8121         mode->vdisplay = vdisplay;
8122         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8123         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8124
8125         return mode;
8126
8127 }
8128
8129 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8130                                                  struct drm_connector *connector)
8131 {
8132         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8133         struct drm_display_mode *mode = NULL;
8134         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8135         struct amdgpu_dm_connector *amdgpu_dm_connector =
8136                                 to_amdgpu_dm_connector(connector);
8137         int i;
8138         int n;
8139         struct mode_size {
8140                 char name[DRM_DISPLAY_MODE_LEN];
8141                 int w;
8142                 int h;
8143         } common_modes[] = {
8144                 {  "640x480",  640,  480},
8145                 {  "800x600",  800,  600},
8146                 { "1024x768", 1024,  768},
8147                 { "1280x720", 1280,  720},
8148                 { "1280x800", 1280,  800},
8149                 {"1280x1024", 1280, 1024},
8150                 { "1440x900", 1440,  900},
8151                 {"1680x1050", 1680, 1050},
8152                 {"1600x1200", 1600, 1200},
8153                 {"1920x1080", 1920, 1080},
8154                 {"1920x1200", 1920, 1200}
8155         };
8156
8157         n = ARRAY_SIZE(common_modes);
8158
8159         for (i = 0; i < n; i++) {
8160                 struct drm_display_mode *curmode = NULL;
8161                 bool mode_existed = false;
8162
8163                 if (common_modes[i].w > native_mode->hdisplay ||
8164                     common_modes[i].h > native_mode->vdisplay ||
8165                    (common_modes[i].w == native_mode->hdisplay &&
8166                     common_modes[i].h == native_mode->vdisplay))
8167                         continue;
8168
8169                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8170                         if (common_modes[i].w == curmode->hdisplay &&
8171                             common_modes[i].h == curmode->vdisplay) {
8172                                 mode_existed = true;
8173                                 break;
8174                         }
8175                 }
8176
8177                 if (mode_existed)
8178                         continue;
8179
8180                 mode = amdgpu_dm_create_common_mode(encoder,
8181                                 common_modes[i].name, common_modes[i].w,
8182                                 common_modes[i].h);
8183                 if (!mode)
8184                         continue;
8185
8186                 drm_mode_probed_add(connector, mode);
8187                 amdgpu_dm_connector->num_modes++;
8188         }
8189 }
8190
8191 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8192 {
8193         struct drm_encoder *encoder;
8194         struct amdgpu_encoder *amdgpu_encoder;
8195         const struct drm_display_mode *native_mode;
8196
8197         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8198             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8199                 return;
8200
8201         encoder = amdgpu_dm_connector_to_encoder(connector);
8202         if (!encoder)
8203                 return;
8204
8205         amdgpu_encoder = to_amdgpu_encoder(encoder);
8206
8207         native_mode = &amdgpu_encoder->native_mode;
8208         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8209                 return;
8210
8211         drm_connector_set_panel_orientation_with_quirk(connector,
8212                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8213                                                        native_mode->hdisplay,
8214                                                        native_mode->vdisplay);
8215 }
8216
8217 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8218                                               struct edid *edid)
8219 {
8220         struct amdgpu_dm_connector *amdgpu_dm_connector =
8221                         to_amdgpu_dm_connector(connector);
8222
8223         if (edid) {
8224                 /* empty probed_modes */
8225                 INIT_LIST_HEAD(&connector->probed_modes);
8226                 amdgpu_dm_connector->num_modes =
8227                                 drm_add_edid_modes(connector, edid);
8228
8229                 /* sorting the probed modes before calling function
8230                  * amdgpu_dm_get_native_mode() since EDID can have
8231                  * more than one preferred mode. The modes that are
8232                  * later in the probed mode list could be of higher
8233                  * and preferred resolution. For example, 3840x2160
8234                  * resolution in base EDID preferred timing and 4096x2160
8235                  * preferred resolution in DID extension block later.
8236                  */
8237                 drm_mode_sort(&connector->probed_modes);
8238                 amdgpu_dm_get_native_mode(connector);
8239
8240                 /* Freesync capabilities are reset by calling
8241                  * drm_add_edid_modes() and need to be
8242                  * restored here.
8243                  */
8244                 amdgpu_dm_update_freesync_caps(connector, edid);
8245
8246                 amdgpu_set_panel_orientation(connector);
8247         } else {
8248                 amdgpu_dm_connector->num_modes = 0;
8249         }
8250 }
8251
8252 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8253                               struct drm_display_mode *mode)
8254 {
8255         struct drm_display_mode *m;
8256
8257         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8258                 if (drm_mode_equal(m, mode))
8259                         return true;
8260         }
8261
8262         return false;
8263 }
8264
8265 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8266 {
8267         const struct drm_display_mode *m;
8268         struct drm_display_mode *new_mode;
8269         uint i;
8270         uint32_t new_modes_count = 0;
8271
8272         /* Standard FPS values
8273          *
8274          * 23.976       - TV/NTSC
8275          * 24           - Cinema
8276          * 25           - TV/PAL
8277          * 29.97        - TV/NTSC
8278          * 30           - TV/NTSC
8279          * 48           - Cinema HFR
8280          * 50           - TV/PAL
8281          * 60           - Commonly used
8282          * 48,72,96,120 - Multiples of 24
8283          */
8284         static const uint32_t common_rates[] = {
8285                 23976, 24000, 25000, 29970, 30000,
8286                 48000, 50000, 60000, 72000, 96000, 120000
8287         };
8288
8289         /*
8290          * Find mode with highest refresh rate with the same resolution
8291          * as the preferred mode. Some monitors report a preferred mode
8292          * with lower resolution than the highest refresh rate supported.
8293          */
8294
8295         m = get_highest_refresh_rate_mode(aconnector, true);
8296         if (!m)
8297                 return 0;
8298
8299         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8300                 uint64_t target_vtotal, target_vtotal_diff;
8301                 uint64_t num, den;
8302
8303                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8304                         continue;
8305
8306                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8307                     common_rates[i] > aconnector->max_vfreq * 1000)
8308                         continue;
8309
8310                 num = (unsigned long long)m->clock * 1000 * 1000;
8311                 den = common_rates[i] * (unsigned long long)m->htotal;
8312                 target_vtotal = div_u64(num, den);
8313                 target_vtotal_diff = target_vtotal - m->vtotal;
8314
8315                 /* Check for illegal modes */
8316                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8317                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8318                     m->vtotal + target_vtotal_diff < m->vsync_end)
8319                         continue;
8320
8321                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8322                 if (!new_mode)
8323                         goto out;
8324
8325                 new_mode->vtotal += (u16)target_vtotal_diff;
8326                 new_mode->vsync_start += (u16)target_vtotal_diff;
8327                 new_mode->vsync_end += (u16)target_vtotal_diff;
8328                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8329                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8330
8331                 if (!is_duplicate_mode(aconnector, new_mode)) {
8332                         drm_mode_probed_add(&aconnector->base, new_mode);
8333                         new_modes_count += 1;
8334                 } else
8335                         drm_mode_destroy(aconnector->base.dev, new_mode);
8336         }
8337  out:
8338         return new_modes_count;
8339 }
8340
8341 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8342                                                    struct edid *edid)
8343 {
8344         struct amdgpu_dm_connector *amdgpu_dm_connector =
8345                 to_amdgpu_dm_connector(connector);
8346
8347         if (!edid)
8348                 return;
8349
8350         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8351                 amdgpu_dm_connector->num_modes +=
8352                         add_fs_modes(amdgpu_dm_connector);
8353 }
8354
8355 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8356 {
8357         struct amdgpu_dm_connector *amdgpu_dm_connector =
8358                         to_amdgpu_dm_connector(connector);
8359         struct drm_encoder *encoder;
8360         struct edid *edid = amdgpu_dm_connector->edid;
8361
8362         encoder = amdgpu_dm_connector_to_encoder(connector);
8363
8364         if (!drm_edid_is_valid(edid)) {
8365                 amdgpu_dm_connector->num_modes =
8366                                 drm_add_modes_noedid(connector, 640, 480);
8367         } else {
8368                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8369                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8370                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8371         }
8372         amdgpu_dm_fbc_init(connector);
8373
8374         return amdgpu_dm_connector->num_modes;
8375 }
8376
8377 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8378                                      struct amdgpu_dm_connector *aconnector,
8379                                      int connector_type,
8380                                      struct dc_link *link,
8381                                      int link_index)
8382 {
8383         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8384
8385         /*
8386          * Some of the properties below require access to state, like bpc.
8387          * Allocate some default initial connector state with our reset helper.
8388          */
8389         if (aconnector->base.funcs->reset)
8390                 aconnector->base.funcs->reset(&aconnector->base);
8391
8392         aconnector->connector_id = link_index;
8393         aconnector->dc_link = link;
8394         aconnector->base.interlace_allowed = false;
8395         aconnector->base.doublescan_allowed = false;
8396         aconnector->base.stereo_allowed = false;
8397         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8398         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8399         aconnector->audio_inst = -1;
8400         mutex_init(&aconnector->hpd_lock);
8401
8402         /*
8403          * configure support HPD hot plug connector_>polled default value is 0
8404          * which means HPD hot plug not supported
8405          */
8406         switch (connector_type) {
8407         case DRM_MODE_CONNECTOR_HDMIA:
8408                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8409                 aconnector->base.ycbcr_420_allowed =
8410                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8411                 break;
8412         case DRM_MODE_CONNECTOR_DisplayPort:
8413                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8414                 link->link_enc = link_enc_cfg_get_link_enc(link);
8415                 ASSERT(link->link_enc);
8416                 if (link->link_enc)
8417                         aconnector->base.ycbcr_420_allowed =
8418                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8419                 break;
8420         case DRM_MODE_CONNECTOR_DVID:
8421                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8422                 break;
8423         default:
8424                 break;
8425         }
8426
8427         drm_object_attach_property(&aconnector->base.base,
8428                                 dm->ddev->mode_config.scaling_mode_property,
8429                                 DRM_MODE_SCALE_NONE);
8430
8431         drm_object_attach_property(&aconnector->base.base,
8432                                 adev->mode_info.underscan_property,
8433                                 UNDERSCAN_OFF);
8434         drm_object_attach_property(&aconnector->base.base,
8435                                 adev->mode_info.underscan_hborder_property,
8436                                 0);
8437         drm_object_attach_property(&aconnector->base.base,
8438                                 adev->mode_info.underscan_vborder_property,
8439                                 0);
8440
8441         if (!aconnector->mst_port)
8442                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8443
8444         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8445         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8446         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8447
8448         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8449             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8450                 drm_object_attach_property(&aconnector->base.base,
8451                                 adev->mode_info.abm_level_property, 0);
8452         }
8453
8454         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8455             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8456             connector_type == DRM_MODE_CONNECTOR_eDP) {
8457                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8458
8459                 if (!aconnector->mst_port)
8460                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8461
8462 #ifdef CONFIG_DRM_AMD_DC_HDCP
8463                 if (adev->dm.hdcp_workqueue)
8464                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8465 #endif
8466         }
8467 }
8468
8469 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8470                               struct i2c_msg *msgs, int num)
8471 {
8472         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8473         struct ddc_service *ddc_service = i2c->ddc_service;
8474         struct i2c_command cmd;
8475         int i;
8476         int result = -EIO;
8477
8478         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8479
8480         if (!cmd.payloads)
8481                 return result;
8482
8483         cmd.number_of_payloads = num;
8484         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8485         cmd.speed = 100;
8486
8487         for (i = 0; i < num; i++) {
8488                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8489                 cmd.payloads[i].address = msgs[i].addr;
8490                 cmd.payloads[i].length = msgs[i].len;
8491                 cmd.payloads[i].data = msgs[i].buf;
8492         }
8493
8494         if (dc_submit_i2c(
8495                         ddc_service->ctx->dc,
8496                         ddc_service->ddc_pin->hw_info.ddc_channel,
8497                         &cmd))
8498                 result = num;
8499
8500         kfree(cmd.payloads);
8501         return result;
8502 }
8503
8504 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8505 {
8506         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8507 }
8508
8509 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8510         .master_xfer = amdgpu_dm_i2c_xfer,
8511         .functionality = amdgpu_dm_i2c_func,
8512 };
8513
8514 static struct amdgpu_i2c_adapter *
8515 create_i2c(struct ddc_service *ddc_service,
8516            int link_index,
8517            int *res)
8518 {
8519         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8520         struct amdgpu_i2c_adapter *i2c;
8521
8522         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8523         if (!i2c)
8524                 return NULL;
8525         i2c->base.owner = THIS_MODULE;
8526         i2c->base.class = I2C_CLASS_DDC;
8527         i2c->base.dev.parent = &adev->pdev->dev;
8528         i2c->base.algo = &amdgpu_dm_i2c_algo;
8529         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8530         i2c_set_adapdata(&i2c->base, i2c);
8531         i2c->ddc_service = ddc_service;
8532         if (i2c->ddc_service->ddc_pin)
8533                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8534
8535         return i2c;
8536 }
8537
8538
8539 /*
8540  * Note: this function assumes that dc_link_detect() was called for the
8541  * dc_link which will be represented by this aconnector.
8542  */
8543 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8544                                     struct amdgpu_dm_connector *aconnector,
8545                                     uint32_t link_index,
8546                                     struct amdgpu_encoder *aencoder)
8547 {
8548         int res = 0;
8549         int connector_type;
8550         struct dc *dc = dm->dc;
8551         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8552         struct amdgpu_i2c_adapter *i2c;
8553
8554         link->priv = aconnector;
8555
8556         DRM_DEBUG_DRIVER("%s()\n", __func__);
8557
8558         i2c = create_i2c(link->ddc, link->link_index, &res);
8559         if (!i2c) {
8560                 DRM_ERROR("Failed to create i2c adapter data\n");
8561                 return -ENOMEM;
8562         }
8563
8564         aconnector->i2c = i2c;
8565         res = i2c_add_adapter(&i2c->base);
8566
8567         if (res) {
8568                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8569                 goto out_free;
8570         }
8571
8572         connector_type = to_drm_connector_type(link->connector_signal);
8573
8574         res = drm_connector_init_with_ddc(
8575                         dm->ddev,
8576                         &aconnector->base,
8577                         &amdgpu_dm_connector_funcs,
8578                         connector_type,
8579                         &i2c->base);
8580
8581         if (res) {
8582                 DRM_ERROR("connector_init failed\n");
8583                 aconnector->connector_id = -1;
8584                 goto out_free;
8585         }
8586
8587         drm_connector_helper_add(
8588                         &aconnector->base,
8589                         &amdgpu_dm_connector_helper_funcs);
8590
8591         amdgpu_dm_connector_init_helper(
8592                 dm,
8593                 aconnector,
8594                 connector_type,
8595                 link,
8596                 link_index);
8597
8598         drm_connector_attach_encoder(
8599                 &aconnector->base, &aencoder->base);
8600
8601         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8602                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8603                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8604
8605 out_free:
8606         if (res) {
8607                 kfree(i2c);
8608                 aconnector->i2c = NULL;
8609         }
8610         return res;
8611 }
8612
8613 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8614 {
8615         switch (adev->mode_info.num_crtc) {
8616         case 1:
8617                 return 0x1;
8618         case 2:
8619                 return 0x3;
8620         case 3:
8621                 return 0x7;
8622         case 4:
8623                 return 0xf;
8624         case 5:
8625                 return 0x1f;
8626         case 6:
8627         default:
8628                 return 0x3f;
8629         }
8630 }
8631
8632 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8633                                   struct amdgpu_encoder *aencoder,
8634                                   uint32_t link_index)
8635 {
8636         struct amdgpu_device *adev = drm_to_adev(dev);
8637
8638         int res = drm_encoder_init(dev,
8639                                    &aencoder->base,
8640                                    &amdgpu_dm_encoder_funcs,
8641                                    DRM_MODE_ENCODER_TMDS,
8642                                    NULL);
8643
8644         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8645
8646         if (!res)
8647                 aencoder->encoder_id = link_index;
8648         else
8649                 aencoder->encoder_id = -1;
8650
8651         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8652
8653         return res;
8654 }
8655
8656 static void manage_dm_interrupts(struct amdgpu_device *adev,
8657                                  struct amdgpu_crtc *acrtc,
8658                                  bool enable)
8659 {
8660         /*
8661          * We have no guarantee that the frontend index maps to the same
8662          * backend index - some even map to more than one.
8663          *
8664          * TODO: Use a different interrupt or check DC itself for the mapping.
8665          */
8666         int irq_type =
8667                 amdgpu_display_crtc_idx_to_irq_type(
8668                         adev,
8669                         acrtc->crtc_id);
8670
8671         if (enable) {
8672                 drm_crtc_vblank_on(&acrtc->base);
8673                 amdgpu_irq_get(
8674                         adev,
8675                         &adev->pageflip_irq,
8676                         irq_type);
8677 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8678                 amdgpu_irq_get(
8679                         adev,
8680                         &adev->vline0_irq,
8681                         irq_type);
8682 #endif
8683         } else {
8684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8685                 amdgpu_irq_put(
8686                         adev,
8687                         &adev->vline0_irq,
8688                         irq_type);
8689 #endif
8690                 amdgpu_irq_put(
8691                         adev,
8692                         &adev->pageflip_irq,
8693                         irq_type);
8694                 drm_crtc_vblank_off(&acrtc->base);
8695         }
8696 }
8697
8698 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8699                                       struct amdgpu_crtc *acrtc)
8700 {
8701         int irq_type =
8702                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8703
8704         /**
8705          * This reads the current state for the IRQ and force reapplies
8706          * the setting to hardware.
8707          */
8708         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8709 }
8710
8711 static bool
8712 is_scaling_state_different(const struct dm_connector_state *dm_state,
8713                            const struct dm_connector_state *old_dm_state)
8714 {
8715         if (dm_state->scaling != old_dm_state->scaling)
8716                 return true;
8717         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8718                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8719                         return true;
8720         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8721                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8722                         return true;
8723         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8724                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8725                 return true;
8726         return false;
8727 }
8728
8729 #ifdef CONFIG_DRM_AMD_DC_HDCP
8730 static bool is_content_protection_different(struct drm_connector_state *state,
8731                                             const struct drm_connector_state *old_state,
8732                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8733 {
8734         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8735         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8736
8737         /* Handle: Type0/1 change */
8738         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8739             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8740                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8741                 return true;
8742         }
8743
8744         /* CP is being re enabled, ignore this
8745          *
8746          * Handles:     ENABLED -> DESIRED
8747          */
8748         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8749             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8750                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8751                 return false;
8752         }
8753
8754         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8755          *
8756          * Handles:     UNDESIRED -> ENABLED
8757          */
8758         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8759             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8760                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8761
8762         /* Stream removed and re-enabled
8763          *
8764          * Can sometimes overlap with the HPD case,
8765          * thus set update_hdcp to false to avoid
8766          * setting HDCP multiple times.
8767          *
8768          * Handles:     DESIRED -> DESIRED (Special case)
8769          */
8770         if (!(old_state->crtc && old_state->crtc->enabled) &&
8771                 state->crtc && state->crtc->enabled &&
8772                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8773                 dm_con_state->update_hdcp = false;
8774                 return true;
8775         }
8776
8777         /* Hot-plug, headless s3, dpms
8778          *
8779          * Only start HDCP if the display is connected/enabled.
8780          * update_hdcp flag will be set to false until the next
8781          * HPD comes in.
8782          *
8783          * Handles:     DESIRED -> DESIRED (Special case)
8784          */
8785         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8786             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8787                 dm_con_state->update_hdcp = false;
8788                 return true;
8789         }
8790
8791         /*
8792          * Handles:     UNDESIRED -> UNDESIRED
8793          *              DESIRED -> DESIRED
8794          *              ENABLED -> ENABLED
8795          */
8796         if (old_state->content_protection == state->content_protection)
8797                 return false;
8798
8799         /*
8800          * Handles:     UNDESIRED -> DESIRED
8801          *              DESIRED -> UNDESIRED
8802          *              ENABLED -> UNDESIRED
8803          */
8804         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8805                 return true;
8806
8807         /*
8808          * Handles:     DESIRED -> ENABLED
8809          */
8810         return false;
8811 }
8812
8813 #endif
8814 static void remove_stream(struct amdgpu_device *adev,
8815                           struct amdgpu_crtc *acrtc,
8816                           struct dc_stream_state *stream)
8817 {
8818         /* this is the update mode case */
8819
8820         acrtc->otg_inst = -1;
8821         acrtc->enabled = false;
8822 }
8823
8824 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8825                                struct dc_cursor_position *position)
8826 {
8827         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8828         int x, y;
8829         int xorigin = 0, yorigin = 0;
8830
8831         if (!crtc || !plane->state->fb)
8832                 return 0;
8833
8834         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8835             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8836                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8837                           __func__,
8838                           plane->state->crtc_w,
8839                           plane->state->crtc_h);
8840                 return -EINVAL;
8841         }
8842
8843         x = plane->state->crtc_x;
8844         y = plane->state->crtc_y;
8845
8846         if (x <= -amdgpu_crtc->max_cursor_width ||
8847             y <= -amdgpu_crtc->max_cursor_height)
8848                 return 0;
8849
8850         if (x < 0) {
8851                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8852                 x = 0;
8853         }
8854         if (y < 0) {
8855                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8856                 y = 0;
8857         }
8858         position->enable = true;
8859         position->translate_by_source = true;
8860         position->x = x;
8861         position->y = y;
8862         position->x_hotspot = xorigin;
8863         position->y_hotspot = yorigin;
8864
8865         return 0;
8866 }
8867
8868 static void handle_cursor_update(struct drm_plane *plane,
8869                                  struct drm_plane_state *old_plane_state)
8870 {
8871         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8872         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8873         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8874         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8875         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8876         uint64_t address = afb ? afb->address : 0;
8877         struct dc_cursor_position position = {0};
8878         struct dc_cursor_attributes attributes;
8879         int ret;
8880
8881         if (!plane->state->fb && !old_plane_state->fb)
8882                 return;
8883
8884         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8885                       __func__,
8886                       amdgpu_crtc->crtc_id,
8887                       plane->state->crtc_w,
8888                       plane->state->crtc_h);
8889
8890         ret = get_cursor_position(plane, crtc, &position);
8891         if (ret)
8892                 return;
8893
8894         if (!position.enable) {
8895                 /* turn off cursor */
8896                 if (crtc_state && crtc_state->stream) {
8897                         mutex_lock(&adev->dm.dc_lock);
8898                         dc_stream_set_cursor_position(crtc_state->stream,
8899                                                       &position);
8900                         mutex_unlock(&adev->dm.dc_lock);
8901                 }
8902                 return;
8903         }
8904
8905         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8906         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8907
8908         memset(&attributes, 0, sizeof(attributes));
8909         attributes.address.high_part = upper_32_bits(address);
8910         attributes.address.low_part  = lower_32_bits(address);
8911         attributes.width             = plane->state->crtc_w;
8912         attributes.height            = plane->state->crtc_h;
8913         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8914         attributes.rotation_angle    = 0;
8915         attributes.attribute_flags.value = 0;
8916
8917         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8918
8919         if (crtc_state->stream) {
8920                 mutex_lock(&adev->dm.dc_lock);
8921                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8922                                                          &attributes))
8923                         DRM_ERROR("DC failed to set cursor attributes\n");
8924
8925                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8926                                                    &position))
8927                         DRM_ERROR("DC failed to set cursor position\n");
8928                 mutex_unlock(&adev->dm.dc_lock);
8929         }
8930 }
8931
8932 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8933 {
8934
8935         assert_spin_locked(&acrtc->base.dev->event_lock);
8936         WARN_ON(acrtc->event);
8937
8938         acrtc->event = acrtc->base.state->event;
8939
8940         /* Set the flip status */
8941         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8942
8943         /* Mark this event as consumed */
8944         acrtc->base.state->event = NULL;
8945
8946         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8947                      acrtc->crtc_id);
8948 }
8949
8950 static void update_freesync_state_on_stream(
8951         struct amdgpu_display_manager *dm,
8952         struct dm_crtc_state *new_crtc_state,
8953         struct dc_stream_state *new_stream,
8954         struct dc_plane_state *surface,
8955         u32 flip_timestamp_in_us)
8956 {
8957         struct mod_vrr_params vrr_params;
8958         struct dc_info_packet vrr_infopacket = {0};
8959         struct amdgpu_device *adev = dm->adev;
8960         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8961         unsigned long flags;
8962         bool pack_sdp_v1_3 = false;
8963
8964         if (!new_stream)
8965                 return;
8966
8967         /*
8968          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8969          * For now it's sufficient to just guard against these conditions.
8970          */
8971
8972         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8973                 return;
8974
8975         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8976         vrr_params = acrtc->dm_irq_params.vrr_params;
8977
8978         if (surface) {
8979                 mod_freesync_handle_preflip(
8980                         dm->freesync_module,
8981                         surface,
8982                         new_stream,
8983                         flip_timestamp_in_us,
8984                         &vrr_params);
8985
8986                 if (adev->family < AMDGPU_FAMILY_AI &&
8987                     amdgpu_dm_vrr_active(new_crtc_state)) {
8988                         mod_freesync_handle_v_update(dm->freesync_module,
8989                                                      new_stream, &vrr_params);
8990
8991                         /* Need to call this before the frame ends. */
8992                         dc_stream_adjust_vmin_vmax(dm->dc,
8993                                                    new_crtc_state->stream,
8994                                                    &vrr_params.adjust);
8995                 }
8996         }
8997
8998         mod_freesync_build_vrr_infopacket(
8999                 dm->freesync_module,
9000                 new_stream,
9001                 &vrr_params,
9002                 PACKET_TYPE_VRR,
9003                 TRANSFER_FUNC_UNKNOWN,
9004                 &vrr_infopacket,
9005                 pack_sdp_v1_3);
9006
9007         new_crtc_state->freesync_timing_changed |=
9008                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9009                         &vrr_params.adjust,
9010                         sizeof(vrr_params.adjust)) != 0);
9011
9012         new_crtc_state->freesync_vrr_info_changed |=
9013                 (memcmp(&new_crtc_state->vrr_infopacket,
9014                         &vrr_infopacket,
9015                         sizeof(vrr_infopacket)) != 0);
9016
9017         acrtc->dm_irq_params.vrr_params = vrr_params;
9018         new_crtc_state->vrr_infopacket = vrr_infopacket;
9019
9020         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9021         new_stream->vrr_infopacket = vrr_infopacket;
9022
9023         if (new_crtc_state->freesync_vrr_info_changed)
9024                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9025                               new_crtc_state->base.crtc->base.id,
9026                               (int)new_crtc_state->base.vrr_enabled,
9027                               (int)vrr_params.state);
9028
9029         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9030 }
9031
9032 static void update_stream_irq_parameters(
9033         struct amdgpu_display_manager *dm,
9034         struct dm_crtc_state *new_crtc_state)
9035 {
9036         struct dc_stream_state *new_stream = new_crtc_state->stream;
9037         struct mod_vrr_params vrr_params;
9038         struct mod_freesync_config config = new_crtc_state->freesync_config;
9039         struct amdgpu_device *adev = dm->adev;
9040         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9041         unsigned long flags;
9042
9043         if (!new_stream)
9044                 return;
9045
9046         /*
9047          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9048          * For now it's sufficient to just guard against these conditions.
9049          */
9050         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9051                 return;
9052
9053         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9054         vrr_params = acrtc->dm_irq_params.vrr_params;
9055
9056         if (new_crtc_state->vrr_supported &&
9057             config.min_refresh_in_uhz &&
9058             config.max_refresh_in_uhz) {
9059                 /*
9060                  * if freesync compatible mode was set, config.state will be set
9061                  * in atomic check
9062                  */
9063                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9064                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9065                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9066                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9067                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9068                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9069                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9070                 } else {
9071                         config.state = new_crtc_state->base.vrr_enabled ?
9072                                                      VRR_STATE_ACTIVE_VARIABLE :
9073                                                      VRR_STATE_INACTIVE;
9074                 }
9075         } else {
9076                 config.state = VRR_STATE_UNSUPPORTED;
9077         }
9078
9079         mod_freesync_build_vrr_params(dm->freesync_module,
9080                                       new_stream,
9081                                       &config, &vrr_params);
9082
9083         new_crtc_state->freesync_timing_changed |=
9084                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9085                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9086
9087         new_crtc_state->freesync_config = config;
9088         /* Copy state for access from DM IRQ handler */
9089         acrtc->dm_irq_params.freesync_config = config;
9090         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9091         acrtc->dm_irq_params.vrr_params = vrr_params;
9092         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9093 }
9094
9095 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9096                                             struct dm_crtc_state *new_state)
9097 {
9098         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9099         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9100
9101         if (!old_vrr_active && new_vrr_active) {
9102                 /* Transition VRR inactive -> active:
9103                  * While VRR is active, we must not disable vblank irq, as a
9104                  * reenable after disable would compute bogus vblank/pflip
9105                  * timestamps if it likely happened inside display front-porch.
9106                  *
9107                  * We also need vupdate irq for the actual core vblank handling
9108                  * at end of vblank.
9109                  */
9110                 dm_set_vupdate_irq(new_state->base.crtc, true);
9111                 drm_crtc_vblank_get(new_state->base.crtc);
9112                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9113                                  __func__, new_state->base.crtc->base.id);
9114         } else if (old_vrr_active && !new_vrr_active) {
9115                 /* Transition VRR active -> inactive:
9116                  * Allow vblank irq disable again for fixed refresh rate.
9117                  */
9118                 dm_set_vupdate_irq(new_state->base.crtc, false);
9119                 drm_crtc_vblank_put(new_state->base.crtc);
9120                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9121                                  __func__, new_state->base.crtc->base.id);
9122         }
9123 }
9124
9125 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9126 {
9127         struct drm_plane *plane;
9128         struct drm_plane_state *old_plane_state;
9129         int i;
9130
9131         /*
9132          * TODO: Make this per-stream so we don't issue redundant updates for
9133          * commits with multiple streams.
9134          */
9135         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9136                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9137                         handle_cursor_update(plane, old_plane_state);
9138 }
9139
9140 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9141                                     struct dc_state *dc_state,
9142                                     struct drm_device *dev,
9143                                     struct amdgpu_display_manager *dm,
9144                                     struct drm_crtc *pcrtc,
9145                                     bool wait_for_vblank)
9146 {
9147         uint32_t i;
9148         uint64_t timestamp_ns;
9149         struct drm_plane *plane;
9150         struct drm_plane_state *old_plane_state, *new_plane_state;
9151         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9152         struct drm_crtc_state *new_pcrtc_state =
9153                         drm_atomic_get_new_crtc_state(state, pcrtc);
9154         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9155         struct dm_crtc_state *dm_old_crtc_state =
9156                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9157         int planes_count = 0, vpos, hpos;
9158         long r;
9159         unsigned long flags;
9160         struct amdgpu_bo *abo;
9161         uint32_t target_vblank, last_flip_vblank;
9162         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9163         bool pflip_present = false;
9164         struct {
9165                 struct dc_surface_update surface_updates[MAX_SURFACES];
9166                 struct dc_plane_info plane_infos[MAX_SURFACES];
9167                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9168                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9169                 struct dc_stream_update stream_update;
9170         } *bundle;
9171
9172         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9173
9174         if (!bundle) {
9175                 dm_error("Failed to allocate update bundle\n");
9176                 goto cleanup;
9177         }
9178
9179         /*
9180          * Disable the cursor first if we're disabling all the planes.
9181          * It'll remain on the screen after the planes are re-enabled
9182          * if we don't.
9183          */
9184         if (acrtc_state->active_planes == 0)
9185                 amdgpu_dm_commit_cursors(state);
9186
9187         /* update planes when needed */
9188         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9189                 struct drm_crtc *crtc = new_plane_state->crtc;
9190                 struct drm_crtc_state *new_crtc_state;
9191                 struct drm_framebuffer *fb = new_plane_state->fb;
9192                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9193                 bool plane_needs_flip;
9194                 struct dc_plane_state *dc_plane;
9195                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9196
9197                 /* Cursor plane is handled after stream updates */
9198                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9199                         continue;
9200
9201                 if (!fb || !crtc || pcrtc != crtc)
9202                         continue;
9203
9204                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9205                 if (!new_crtc_state->active)
9206                         continue;
9207
9208                 dc_plane = dm_new_plane_state->dc_state;
9209
9210                 bundle->surface_updates[planes_count].surface = dc_plane;
9211                 if (new_pcrtc_state->color_mgmt_changed) {
9212                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9213                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9214                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9215                 }
9216
9217                 fill_dc_scaling_info(dm->adev, new_plane_state,
9218                                      &bundle->scaling_infos[planes_count]);
9219
9220                 bundle->surface_updates[planes_count].scaling_info =
9221                         &bundle->scaling_infos[planes_count];
9222
9223                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9224
9225                 pflip_present = pflip_present || plane_needs_flip;
9226
9227                 if (!plane_needs_flip) {
9228                         planes_count += 1;
9229                         continue;
9230                 }
9231
9232                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9233
9234                 /*
9235                  * Wait for all fences on this FB. Do limited wait to avoid
9236                  * deadlock during GPU reset when this fence will not signal
9237                  * but we hold reservation lock for the BO.
9238                  */
9239                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9240                                           msecs_to_jiffies(5000));
9241                 if (unlikely(r <= 0))
9242                         DRM_ERROR("Waiting for fences timed out!");
9243
9244                 fill_dc_plane_info_and_addr(
9245                         dm->adev, new_plane_state,
9246                         afb->tiling_flags,
9247                         &bundle->plane_infos[planes_count],
9248                         &bundle->flip_addrs[planes_count].address,
9249                         afb->tmz_surface, false);
9250
9251                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9252                                  new_plane_state->plane->index,
9253                                  bundle->plane_infos[planes_count].dcc.enable);
9254
9255                 bundle->surface_updates[planes_count].plane_info =
9256                         &bundle->plane_infos[planes_count];
9257
9258                 /*
9259                  * Only allow immediate flips for fast updates that don't
9260                  * change FB pitch, DCC state, rotation or mirroing.
9261                  */
9262                 bundle->flip_addrs[planes_count].flip_immediate =
9263                         crtc->state->async_flip &&
9264                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9265
9266                 timestamp_ns = ktime_get_ns();
9267                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9268                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9269                 bundle->surface_updates[planes_count].surface = dc_plane;
9270
9271                 if (!bundle->surface_updates[planes_count].surface) {
9272                         DRM_ERROR("No surface for CRTC: id=%d\n",
9273                                         acrtc_attach->crtc_id);
9274                         continue;
9275                 }
9276
9277                 if (plane == pcrtc->primary)
9278                         update_freesync_state_on_stream(
9279                                 dm,
9280                                 acrtc_state,
9281                                 acrtc_state->stream,
9282                                 dc_plane,
9283                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9284
9285                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9286                                  __func__,
9287                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9288                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9289
9290                 planes_count += 1;
9291
9292         }
9293
9294         if (pflip_present) {
9295                 if (!vrr_active) {
9296                         /* Use old throttling in non-vrr fixed refresh rate mode
9297                          * to keep flip scheduling based on target vblank counts
9298                          * working in a backwards compatible way, e.g., for
9299                          * clients using the GLX_OML_sync_control extension or
9300                          * DRI3/Present extension with defined target_msc.
9301                          */
9302                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9303                 }
9304                 else {
9305                         /* For variable refresh rate mode only:
9306                          * Get vblank of last completed flip to avoid > 1 vrr
9307                          * flips per video frame by use of throttling, but allow
9308                          * flip programming anywhere in the possibly large
9309                          * variable vrr vblank interval for fine-grained flip
9310                          * timing control and more opportunity to avoid stutter
9311                          * on late submission of flips.
9312                          */
9313                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9314                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9315                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9316                 }
9317
9318                 target_vblank = last_flip_vblank + wait_for_vblank;
9319
9320                 /*
9321                  * Wait until we're out of the vertical blank period before the one
9322                  * targeted by the flip
9323                  */
9324                 while ((acrtc_attach->enabled &&
9325                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9326                                                             0, &vpos, &hpos, NULL,
9327                                                             NULL, &pcrtc->hwmode)
9328                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9329                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9330                         (int)(target_vblank -
9331                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9332                         usleep_range(1000, 1100);
9333                 }
9334
9335                 /**
9336                  * Prepare the flip event for the pageflip interrupt to handle.
9337                  *
9338                  * This only works in the case where we've already turned on the
9339                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9340                  * from 0 -> n planes we have to skip a hardware generated event
9341                  * and rely on sending it from software.
9342                  */
9343                 if (acrtc_attach->base.state->event &&
9344                     acrtc_state->active_planes > 0 &&
9345                     !acrtc_state->force_dpms_off) {
9346                         drm_crtc_vblank_get(pcrtc);
9347
9348                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9349
9350                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9351                         prepare_flip_isr(acrtc_attach);
9352
9353                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9354                 }
9355
9356                 if (acrtc_state->stream) {
9357                         if (acrtc_state->freesync_vrr_info_changed)
9358                                 bundle->stream_update.vrr_infopacket =
9359                                         &acrtc_state->stream->vrr_infopacket;
9360                 }
9361         }
9362
9363         /* Update the planes if changed or disable if we don't have any. */
9364         if ((planes_count || acrtc_state->active_planes == 0) &&
9365                 acrtc_state->stream) {
9366 #if defined(CONFIG_DRM_AMD_DC_DCN)
9367                 /*
9368                  * If PSR or idle optimizations are enabled then flush out
9369                  * any pending work before hardware programming.
9370                  */
9371                 if (dm->vblank_control_workqueue)
9372                         flush_workqueue(dm->vblank_control_workqueue);
9373 #endif
9374
9375                 bundle->stream_update.stream = acrtc_state->stream;
9376                 if (new_pcrtc_state->mode_changed) {
9377                         bundle->stream_update.src = acrtc_state->stream->src;
9378                         bundle->stream_update.dst = acrtc_state->stream->dst;
9379                 }
9380
9381                 if (new_pcrtc_state->color_mgmt_changed) {
9382                         /*
9383                          * TODO: This isn't fully correct since we've actually
9384                          * already modified the stream in place.
9385                          */
9386                         bundle->stream_update.gamut_remap =
9387                                 &acrtc_state->stream->gamut_remap_matrix;
9388                         bundle->stream_update.output_csc_transform =
9389                                 &acrtc_state->stream->csc_color_matrix;
9390                         bundle->stream_update.out_transfer_func =
9391                                 acrtc_state->stream->out_transfer_func;
9392                 }
9393
9394                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9395                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9396                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9397
9398                 /*
9399                  * If FreeSync state on the stream has changed then we need to
9400                  * re-adjust the min/max bounds now that DC doesn't handle this
9401                  * as part of commit.
9402                  */
9403                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9404                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9405                         dc_stream_adjust_vmin_vmax(
9406                                 dm->dc, acrtc_state->stream,
9407                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9408                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9409                 }
9410                 mutex_lock(&dm->dc_lock);
9411                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9412                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9413                         amdgpu_dm_psr_disable(acrtc_state->stream);
9414
9415                 dc_commit_updates_for_stream(dm->dc,
9416                                                      bundle->surface_updates,
9417                                                      planes_count,
9418                                                      acrtc_state->stream,
9419                                                      &bundle->stream_update,
9420                                                      dc_state);
9421
9422                 /**
9423                  * Enable or disable the interrupts on the backend.
9424                  *
9425                  * Most pipes are put into power gating when unused.
9426                  *
9427                  * When power gating is enabled on a pipe we lose the
9428                  * interrupt enablement state when power gating is disabled.
9429                  *
9430                  * So we need to update the IRQ control state in hardware
9431                  * whenever the pipe turns on (since it could be previously
9432                  * power gated) or off (since some pipes can't be power gated
9433                  * on some ASICs).
9434                  */
9435                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9436                         dm_update_pflip_irq_state(drm_to_adev(dev),
9437                                                   acrtc_attach);
9438
9439                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9440                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9441                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9442                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9443
9444                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9445                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9446                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9447                         struct amdgpu_dm_connector *aconn =
9448                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9449
9450                         if (aconn->psr_skip_count > 0)
9451                                 aconn->psr_skip_count--;
9452
9453                         /* Allow PSR when skip count is 0. */
9454                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9455                 } else {
9456                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9457                 }
9458
9459                 mutex_unlock(&dm->dc_lock);
9460         }
9461
9462         /*
9463          * Update cursor state *after* programming all the planes.
9464          * This avoids redundant programming in the case where we're going
9465          * to be disabling a single plane - those pipes are being disabled.
9466          */
9467         if (acrtc_state->active_planes)
9468                 amdgpu_dm_commit_cursors(state);
9469
9470 cleanup:
9471         kfree(bundle);
9472 }
9473
9474 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9475                                    struct drm_atomic_state *state)
9476 {
9477         struct amdgpu_device *adev = drm_to_adev(dev);
9478         struct amdgpu_dm_connector *aconnector;
9479         struct drm_connector *connector;
9480         struct drm_connector_state *old_con_state, *new_con_state;
9481         struct drm_crtc_state *new_crtc_state;
9482         struct dm_crtc_state *new_dm_crtc_state;
9483         const struct dc_stream_status *status;
9484         int i, inst;
9485
9486         /* Notify device removals. */
9487         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9488                 if (old_con_state->crtc != new_con_state->crtc) {
9489                         /* CRTC changes require notification. */
9490                         goto notify;
9491                 }
9492
9493                 if (!new_con_state->crtc)
9494                         continue;
9495
9496                 new_crtc_state = drm_atomic_get_new_crtc_state(
9497                         state, new_con_state->crtc);
9498
9499                 if (!new_crtc_state)
9500                         continue;
9501
9502                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9503                         continue;
9504
9505         notify:
9506                 aconnector = to_amdgpu_dm_connector(connector);
9507
9508                 mutex_lock(&adev->dm.audio_lock);
9509                 inst = aconnector->audio_inst;
9510                 aconnector->audio_inst = -1;
9511                 mutex_unlock(&adev->dm.audio_lock);
9512
9513                 amdgpu_dm_audio_eld_notify(adev, inst);
9514         }
9515
9516         /* Notify audio device additions. */
9517         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9518                 if (!new_con_state->crtc)
9519                         continue;
9520
9521                 new_crtc_state = drm_atomic_get_new_crtc_state(
9522                         state, new_con_state->crtc);
9523
9524                 if (!new_crtc_state)
9525                         continue;
9526
9527                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9528                         continue;
9529
9530                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9531                 if (!new_dm_crtc_state->stream)
9532                         continue;
9533
9534                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9535                 if (!status)
9536                         continue;
9537
9538                 aconnector = to_amdgpu_dm_connector(connector);
9539
9540                 mutex_lock(&adev->dm.audio_lock);
9541                 inst = status->audio_inst;
9542                 aconnector->audio_inst = inst;
9543                 mutex_unlock(&adev->dm.audio_lock);
9544
9545                 amdgpu_dm_audio_eld_notify(adev, inst);
9546         }
9547 }
9548
9549 /*
9550  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9551  * @crtc_state: the DRM CRTC state
9552  * @stream_state: the DC stream state.
9553  *
9554  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9555  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9556  */
9557 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9558                                                 struct dc_stream_state *stream_state)
9559 {
9560         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9561 }
9562
9563 /**
9564  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9565  * @state: The atomic state to commit
9566  *
9567  * This will tell DC to commit the constructed DC state from atomic_check,
9568  * programming the hardware. Any failures here implies a hardware failure, since
9569  * atomic check should have filtered anything non-kosher.
9570  */
9571 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9572 {
9573         struct drm_device *dev = state->dev;
9574         struct amdgpu_device *adev = drm_to_adev(dev);
9575         struct amdgpu_display_manager *dm = &adev->dm;
9576         struct dm_atomic_state *dm_state;
9577         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9578         uint32_t i, j;
9579         struct drm_crtc *crtc;
9580         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9581         unsigned long flags;
9582         bool wait_for_vblank = true;
9583         struct drm_connector *connector;
9584         struct drm_connector_state *old_con_state, *new_con_state;
9585         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9586         int crtc_disable_count = 0;
9587         bool mode_set_reset_required = false;
9588
9589         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9590
9591         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9592
9593         dm_state = dm_atomic_get_new_state(state);
9594         if (dm_state && dm_state->context) {
9595                 dc_state = dm_state->context;
9596         } else {
9597                 /* No state changes, retain current state. */
9598                 dc_state_temp = dc_create_state(dm->dc);
9599                 ASSERT(dc_state_temp);
9600                 dc_state = dc_state_temp;
9601                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9602         }
9603
9604         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9605                                        new_crtc_state, i) {
9606                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9607
9608                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9609
9610                 if (old_crtc_state->active &&
9611                     (!new_crtc_state->active ||
9612                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9613                         manage_dm_interrupts(adev, acrtc, false);
9614                         dc_stream_release(dm_old_crtc_state->stream);
9615                 }
9616         }
9617
9618         drm_atomic_helper_calc_timestamping_constants(state);
9619
9620         /* update changed items */
9621         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9622                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9623
9624                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9625                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9626
9627                 DRM_DEBUG_ATOMIC(
9628                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9629                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9630                         "connectors_changed:%d\n",
9631                         acrtc->crtc_id,
9632                         new_crtc_state->enable,
9633                         new_crtc_state->active,
9634                         new_crtc_state->planes_changed,
9635                         new_crtc_state->mode_changed,
9636                         new_crtc_state->active_changed,
9637                         new_crtc_state->connectors_changed);
9638
9639                 /* Disable cursor if disabling crtc */
9640                 if (old_crtc_state->active && !new_crtc_state->active) {
9641                         struct dc_cursor_position position;
9642
9643                         memset(&position, 0, sizeof(position));
9644                         mutex_lock(&dm->dc_lock);
9645                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9646                         mutex_unlock(&dm->dc_lock);
9647                 }
9648
9649                 /* Copy all transient state flags into dc state */
9650                 if (dm_new_crtc_state->stream) {
9651                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9652                                                             dm_new_crtc_state->stream);
9653                 }
9654
9655                 /* handles headless hotplug case, updating new_state and
9656                  * aconnector as needed
9657                  */
9658
9659                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9660
9661                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9662
9663                         if (!dm_new_crtc_state->stream) {
9664                                 /*
9665                                  * this could happen because of issues with
9666                                  * userspace notifications delivery.
9667                                  * In this case userspace tries to set mode on
9668                                  * display which is disconnected in fact.
9669                                  * dc_sink is NULL in this case on aconnector.
9670                                  * We expect reset mode will come soon.
9671                                  *
9672                                  * This can also happen when unplug is done
9673                                  * during resume sequence ended
9674                                  *
9675                                  * In this case, we want to pretend we still
9676                                  * have a sink to keep the pipe running so that
9677                                  * hw state is consistent with the sw state
9678                                  */
9679                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9680                                                 __func__, acrtc->base.base.id);
9681                                 continue;
9682                         }
9683
9684                         if (dm_old_crtc_state->stream)
9685                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9686
9687                         pm_runtime_get_noresume(dev->dev);
9688
9689                         acrtc->enabled = true;
9690                         acrtc->hw_mode = new_crtc_state->mode;
9691                         crtc->hwmode = new_crtc_state->mode;
9692                         mode_set_reset_required = true;
9693                 } else if (modereset_required(new_crtc_state)) {
9694                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9695                         /* i.e. reset mode */
9696                         if (dm_old_crtc_state->stream)
9697                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9698
9699                         mode_set_reset_required = true;
9700                 }
9701         } /* for_each_crtc_in_state() */
9702
9703         if (dc_state) {
9704                 /* if there mode set or reset, disable eDP PSR */
9705                 if (mode_set_reset_required) {
9706 #if defined(CONFIG_DRM_AMD_DC_DCN)
9707                         if (dm->vblank_control_workqueue)
9708                                 flush_workqueue(dm->vblank_control_workqueue);
9709 #endif
9710                         amdgpu_dm_psr_disable_all(dm);
9711                 }
9712
9713                 dm_enable_per_frame_crtc_master_sync(dc_state);
9714                 mutex_lock(&dm->dc_lock);
9715                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9716 #if defined(CONFIG_DRM_AMD_DC_DCN)
9717                /* Allow idle optimization when vblank count is 0 for display off */
9718                if (dm->active_vblank_irq_count == 0)
9719                    dc_allow_idle_optimizations(dm->dc,true);
9720 #endif
9721                 mutex_unlock(&dm->dc_lock);
9722         }
9723
9724         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9725                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9726
9727                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9728
9729                 if (dm_new_crtc_state->stream != NULL) {
9730                         const struct dc_stream_status *status =
9731                                         dc_stream_get_status(dm_new_crtc_state->stream);
9732
9733                         if (!status)
9734                                 status = dc_stream_get_status_from_state(dc_state,
9735                                                                          dm_new_crtc_state->stream);
9736                         if (!status)
9737                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9738                         else
9739                                 acrtc->otg_inst = status->primary_otg_inst;
9740                 }
9741         }
9742 #ifdef CONFIG_DRM_AMD_DC_HDCP
9743         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9744                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9745                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9746                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9747
9748                 new_crtc_state = NULL;
9749
9750                 if (acrtc)
9751                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9752
9753                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9754
9755                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9756                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9757                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9758                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9759                         dm_new_con_state->update_hdcp = true;
9760                         continue;
9761                 }
9762
9763                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9764                         hdcp_update_display(
9765                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9766                                 new_con_state->hdcp_content_type,
9767                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9768         }
9769 #endif
9770
9771         /* Handle connector state changes */
9772         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9773                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9774                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9775                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9776                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9777                 struct dc_stream_update stream_update;
9778                 struct dc_info_packet hdr_packet;
9779                 struct dc_stream_status *status = NULL;
9780                 bool abm_changed, hdr_changed, scaling_changed;
9781
9782                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9783                 memset(&stream_update, 0, sizeof(stream_update));
9784
9785                 if (acrtc) {
9786                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9787                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9788                 }
9789
9790                 /* Skip any modesets/resets */
9791                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9792                         continue;
9793
9794                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9795                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9796
9797                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9798                                                              dm_old_con_state);
9799
9800                 abm_changed = dm_new_crtc_state->abm_level !=
9801                               dm_old_crtc_state->abm_level;
9802
9803                 hdr_changed =
9804                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9805
9806                 if (!scaling_changed && !abm_changed && !hdr_changed)
9807                         continue;
9808
9809                 stream_update.stream = dm_new_crtc_state->stream;
9810                 if (scaling_changed) {
9811                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9812                                         dm_new_con_state, dm_new_crtc_state->stream);
9813
9814                         stream_update.src = dm_new_crtc_state->stream->src;
9815                         stream_update.dst = dm_new_crtc_state->stream->dst;
9816                 }
9817
9818                 if (abm_changed) {
9819                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9820
9821                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9822                 }
9823
9824                 if (hdr_changed) {
9825                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9826                         stream_update.hdr_static_metadata = &hdr_packet;
9827                 }
9828
9829                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9830
9831                 if (WARN_ON(!status))
9832                         continue;
9833
9834                 WARN_ON(!status->plane_count);
9835
9836                 /*
9837                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9838                  * Here we create an empty update on each plane.
9839                  * To fix this, DC should permit updating only stream properties.
9840                  */
9841                 for (j = 0; j < status->plane_count; j++)
9842                         dummy_updates[j].surface = status->plane_states[0];
9843
9844
9845                 mutex_lock(&dm->dc_lock);
9846                 dc_commit_updates_for_stream(dm->dc,
9847                                                      dummy_updates,
9848                                                      status->plane_count,
9849                                                      dm_new_crtc_state->stream,
9850                                                      &stream_update,
9851                                                      dc_state);
9852                 mutex_unlock(&dm->dc_lock);
9853         }
9854
9855         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9856         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9857                                       new_crtc_state, i) {
9858                 if (old_crtc_state->active && !new_crtc_state->active)
9859                         crtc_disable_count++;
9860
9861                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9862                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9863
9864                 /* For freesync config update on crtc state and params for irq */
9865                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9866
9867                 /* Handle vrr on->off / off->on transitions */
9868                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9869                                                 dm_new_crtc_state);
9870         }
9871
9872         /**
9873          * Enable interrupts for CRTCs that are newly enabled or went through
9874          * a modeset. It was intentionally deferred until after the front end
9875          * state was modified to wait until the OTG was on and so the IRQ
9876          * handlers didn't access stale or invalid state.
9877          */
9878         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9879                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9880 #ifdef CONFIG_DEBUG_FS
9881                 bool configure_crc = false;
9882                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9883 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9884                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9885 #endif
9886                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9887                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9888                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9889 #endif
9890                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9891
9892                 if (new_crtc_state->active &&
9893                     (!old_crtc_state->active ||
9894                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9895                         dc_stream_retain(dm_new_crtc_state->stream);
9896                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9897                         manage_dm_interrupts(adev, acrtc, true);
9898
9899 #ifdef CONFIG_DEBUG_FS
9900                         /**
9901                          * Frontend may have changed so reapply the CRC capture
9902                          * settings for the stream.
9903                          */
9904                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9905
9906                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9907                                 configure_crc = true;
9908 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9909                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9910                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9911                                         acrtc->dm_irq_params.crc_window.update_win = true;
9912                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9913                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9914                                         crc_rd_wrk->crtc = crtc;
9915                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9916                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9917                                 }
9918 #endif
9919                         }
9920
9921                         if (configure_crc)
9922                                 if (amdgpu_dm_crtc_configure_crc_source(
9923                                         crtc, dm_new_crtc_state, cur_crc_src))
9924                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9925 #endif
9926                 }
9927         }
9928
9929         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9930                 if (new_crtc_state->async_flip)
9931                         wait_for_vblank = false;
9932
9933         /* update planes when needed per crtc*/
9934         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9935                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9936
9937                 if (dm_new_crtc_state->stream)
9938                         amdgpu_dm_commit_planes(state, dc_state, dev,
9939                                                 dm, crtc, wait_for_vblank);
9940         }
9941
9942         /* Update audio instances for each connector. */
9943         amdgpu_dm_commit_audio(dev, state);
9944
9945 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9946         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9947         /* restore the backlight level */
9948         for (i = 0; i < dm->num_of_edps; i++) {
9949                 if (dm->backlight_dev[i] &&
9950                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9951                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9952         }
9953 #endif
9954         /*
9955          * send vblank event on all events not handled in flip and
9956          * mark consumed event for drm_atomic_helper_commit_hw_done
9957          */
9958         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9959         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9960
9961                 if (new_crtc_state->event)
9962                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9963
9964                 new_crtc_state->event = NULL;
9965         }
9966         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9967
9968         /* Signal HW programming completion */
9969         drm_atomic_helper_commit_hw_done(state);
9970
9971         if (wait_for_vblank)
9972                 drm_atomic_helper_wait_for_flip_done(dev, state);
9973
9974         drm_atomic_helper_cleanup_planes(dev, state);
9975
9976         /* return the stolen vga memory back to VRAM */
9977         if (!adev->mman.keep_stolen_vga_memory)
9978                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9979         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9980
9981         /*
9982          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9983          * so we can put the GPU into runtime suspend if we're not driving any
9984          * displays anymore
9985          */
9986         for (i = 0; i < crtc_disable_count; i++)
9987                 pm_runtime_put_autosuspend(dev->dev);
9988         pm_runtime_mark_last_busy(dev->dev);
9989
9990         if (dc_state_temp)
9991                 dc_release_state(dc_state_temp);
9992 }
9993
9994
9995 static int dm_force_atomic_commit(struct drm_connector *connector)
9996 {
9997         int ret = 0;
9998         struct drm_device *ddev = connector->dev;
9999         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10000         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10001         struct drm_plane *plane = disconnected_acrtc->base.primary;
10002         struct drm_connector_state *conn_state;
10003         struct drm_crtc_state *crtc_state;
10004         struct drm_plane_state *plane_state;
10005
10006         if (!state)
10007                 return -ENOMEM;
10008
10009         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10010
10011         /* Construct an atomic state to restore previous display setting */
10012
10013         /*
10014          * Attach connectors to drm_atomic_state
10015          */
10016         conn_state = drm_atomic_get_connector_state(state, connector);
10017
10018         ret = PTR_ERR_OR_ZERO(conn_state);
10019         if (ret)
10020                 goto out;
10021
10022         /* Attach crtc to drm_atomic_state*/
10023         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10024
10025         ret = PTR_ERR_OR_ZERO(crtc_state);
10026         if (ret)
10027                 goto out;
10028
10029         /* force a restore */
10030         crtc_state->mode_changed = true;
10031
10032         /* Attach plane to drm_atomic_state */
10033         plane_state = drm_atomic_get_plane_state(state, plane);
10034
10035         ret = PTR_ERR_OR_ZERO(plane_state);
10036         if (ret)
10037                 goto out;
10038
10039         /* Call commit internally with the state we just constructed */
10040         ret = drm_atomic_commit(state);
10041
10042 out:
10043         drm_atomic_state_put(state);
10044         if (ret)
10045                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10046
10047         return ret;
10048 }
10049
10050 /*
10051  * This function handles all cases when set mode does not come upon hotplug.
10052  * This includes when a display is unplugged then plugged back into the
10053  * same port and when running without usermode desktop manager supprot
10054  */
10055 void dm_restore_drm_connector_state(struct drm_device *dev,
10056                                     struct drm_connector *connector)
10057 {
10058         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10059         struct amdgpu_crtc *disconnected_acrtc;
10060         struct dm_crtc_state *acrtc_state;
10061
10062         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10063                 return;
10064
10065         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10066         if (!disconnected_acrtc)
10067                 return;
10068
10069         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10070         if (!acrtc_state->stream)
10071                 return;
10072
10073         /*
10074          * If the previous sink is not released and different from the current,
10075          * we deduce we are in a state where we can not rely on usermode call
10076          * to turn on the display, so we do it here
10077          */
10078         if (acrtc_state->stream->sink != aconnector->dc_sink)
10079                 dm_force_atomic_commit(&aconnector->base);
10080 }
10081
10082 /*
10083  * Grabs all modesetting locks to serialize against any blocking commits,
10084  * Waits for completion of all non blocking commits.
10085  */
10086 static int do_aquire_global_lock(struct drm_device *dev,
10087                                  struct drm_atomic_state *state)
10088 {
10089         struct drm_crtc *crtc;
10090         struct drm_crtc_commit *commit;
10091         long ret;
10092
10093         /*
10094          * Adding all modeset locks to aquire_ctx will
10095          * ensure that when the framework release it the
10096          * extra locks we are locking here will get released to
10097          */
10098         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10099         if (ret)
10100                 return ret;
10101
10102         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10103                 spin_lock(&crtc->commit_lock);
10104                 commit = list_first_entry_or_null(&crtc->commit_list,
10105                                 struct drm_crtc_commit, commit_entry);
10106                 if (commit)
10107                         drm_crtc_commit_get(commit);
10108                 spin_unlock(&crtc->commit_lock);
10109
10110                 if (!commit)
10111                         continue;
10112
10113                 /*
10114                  * Make sure all pending HW programming completed and
10115                  * page flips done
10116                  */
10117                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10118
10119                 if (ret > 0)
10120                         ret = wait_for_completion_interruptible_timeout(
10121                                         &commit->flip_done, 10*HZ);
10122
10123                 if (ret == 0)
10124                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10125                                   "timed out\n", crtc->base.id, crtc->name);
10126
10127                 drm_crtc_commit_put(commit);
10128         }
10129
10130         return ret < 0 ? ret : 0;
10131 }
10132
10133 static void get_freesync_config_for_crtc(
10134         struct dm_crtc_state *new_crtc_state,
10135         struct dm_connector_state *new_con_state)
10136 {
10137         struct mod_freesync_config config = {0};
10138         struct amdgpu_dm_connector *aconnector =
10139                         to_amdgpu_dm_connector(new_con_state->base.connector);
10140         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10141         int vrefresh = drm_mode_vrefresh(mode);
10142         bool fs_vid_mode = false;
10143
10144         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10145                                         vrefresh >= aconnector->min_vfreq &&
10146                                         vrefresh <= aconnector->max_vfreq;
10147
10148         if (new_crtc_state->vrr_supported) {
10149                 new_crtc_state->stream->ignore_msa_timing_param = true;
10150                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10151
10152                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10153                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10154                 config.vsif_supported = true;
10155                 config.btr = true;
10156
10157                 if (fs_vid_mode) {
10158                         config.state = VRR_STATE_ACTIVE_FIXED;
10159                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10160                         goto out;
10161                 } else if (new_crtc_state->base.vrr_enabled) {
10162                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10163                 } else {
10164                         config.state = VRR_STATE_INACTIVE;
10165                 }
10166         }
10167 out:
10168         new_crtc_state->freesync_config = config;
10169 }
10170
10171 static void reset_freesync_config_for_crtc(
10172         struct dm_crtc_state *new_crtc_state)
10173 {
10174         new_crtc_state->vrr_supported = false;
10175
10176         memset(&new_crtc_state->vrr_infopacket, 0,
10177                sizeof(new_crtc_state->vrr_infopacket));
10178 }
10179
10180 static bool
10181 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10182                                  struct drm_crtc_state *new_crtc_state)
10183 {
10184         const struct drm_display_mode *old_mode, *new_mode;
10185
10186         if (!old_crtc_state || !new_crtc_state)
10187                 return false;
10188
10189         old_mode = &old_crtc_state->mode;
10190         new_mode = &new_crtc_state->mode;
10191
10192         if (old_mode->clock       == new_mode->clock &&
10193             old_mode->hdisplay    == new_mode->hdisplay &&
10194             old_mode->vdisplay    == new_mode->vdisplay &&
10195             old_mode->htotal      == new_mode->htotal &&
10196             old_mode->vtotal      != new_mode->vtotal &&
10197             old_mode->hsync_start == new_mode->hsync_start &&
10198             old_mode->vsync_start != new_mode->vsync_start &&
10199             old_mode->hsync_end   == new_mode->hsync_end &&
10200             old_mode->vsync_end   != new_mode->vsync_end &&
10201             old_mode->hskew       == new_mode->hskew &&
10202             old_mode->vscan       == new_mode->vscan &&
10203             (old_mode->vsync_end - old_mode->vsync_start) ==
10204             (new_mode->vsync_end - new_mode->vsync_start))
10205                 return true;
10206
10207         return false;
10208 }
10209
10210 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10211         uint64_t num, den, res;
10212         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10213
10214         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10215
10216         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10217         den = (unsigned long long)new_crtc_state->mode.htotal *
10218               (unsigned long long)new_crtc_state->mode.vtotal;
10219
10220         res = div_u64(num, den);
10221         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10222 }
10223
10224 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10225                          struct drm_atomic_state *state,
10226                          struct drm_crtc *crtc,
10227                          struct drm_crtc_state *old_crtc_state,
10228                          struct drm_crtc_state *new_crtc_state,
10229                          bool enable,
10230                          bool *lock_and_validation_needed)
10231 {
10232         struct dm_atomic_state *dm_state = NULL;
10233         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10234         struct dc_stream_state *new_stream;
10235         int ret = 0;
10236
10237         /*
10238          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10239          * update changed items
10240          */
10241         struct amdgpu_crtc *acrtc = NULL;
10242         struct amdgpu_dm_connector *aconnector = NULL;
10243         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10244         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10245
10246         new_stream = NULL;
10247
10248         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10249         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10250         acrtc = to_amdgpu_crtc(crtc);
10251         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10252
10253         /* TODO This hack should go away */
10254         if (aconnector && enable) {
10255                 /* Make sure fake sink is created in plug-in scenario */
10256                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10257                                                             &aconnector->base);
10258                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10259                                                             &aconnector->base);
10260
10261                 if (IS_ERR(drm_new_conn_state)) {
10262                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10263                         goto fail;
10264                 }
10265
10266                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10267                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10268
10269                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10270                         goto skip_modeset;
10271
10272                 new_stream = create_validate_stream_for_sink(aconnector,
10273                                                              &new_crtc_state->mode,
10274                                                              dm_new_conn_state,
10275                                                              dm_old_crtc_state->stream);
10276
10277                 /*
10278                  * we can have no stream on ACTION_SET if a display
10279                  * was disconnected during S3, in this case it is not an
10280                  * error, the OS will be updated after detection, and
10281                  * will do the right thing on next atomic commit
10282                  */
10283
10284                 if (!new_stream) {
10285                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10286                                         __func__, acrtc->base.base.id);
10287                         ret = -ENOMEM;
10288                         goto fail;
10289                 }
10290
10291                 /*
10292                  * TODO: Check VSDB bits to decide whether this should
10293                  * be enabled or not.
10294                  */
10295                 new_stream->triggered_crtc_reset.enabled =
10296                         dm->force_timing_sync;
10297
10298                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10299
10300                 ret = fill_hdr_info_packet(drm_new_conn_state,
10301                                            &new_stream->hdr_static_metadata);
10302                 if (ret)
10303                         goto fail;
10304
10305                 /*
10306                  * If we already removed the old stream from the context
10307                  * (and set the new stream to NULL) then we can't reuse
10308                  * the old stream even if the stream and scaling are unchanged.
10309                  * We'll hit the BUG_ON and black screen.
10310                  *
10311                  * TODO: Refactor this function to allow this check to work
10312                  * in all conditions.
10313                  */
10314                 if (dm_new_crtc_state->stream &&
10315                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10316                         goto skip_modeset;
10317
10318                 if (dm_new_crtc_state->stream &&
10319                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10320                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10321                         new_crtc_state->mode_changed = false;
10322                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10323                                          new_crtc_state->mode_changed);
10324                 }
10325         }
10326
10327         /* mode_changed flag may get updated above, need to check again */
10328         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10329                 goto skip_modeset;
10330
10331         DRM_DEBUG_ATOMIC(
10332                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10333                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10334                 "connectors_changed:%d\n",
10335                 acrtc->crtc_id,
10336                 new_crtc_state->enable,
10337                 new_crtc_state->active,
10338                 new_crtc_state->planes_changed,
10339                 new_crtc_state->mode_changed,
10340                 new_crtc_state->active_changed,
10341                 new_crtc_state->connectors_changed);
10342
10343         /* Remove stream for any changed/disabled CRTC */
10344         if (!enable) {
10345
10346                 if (!dm_old_crtc_state->stream)
10347                         goto skip_modeset;
10348
10349                 if (dm_new_crtc_state->stream &&
10350                     is_timing_unchanged_for_freesync(new_crtc_state,
10351                                                      old_crtc_state)) {
10352                         new_crtc_state->mode_changed = false;
10353                         DRM_DEBUG_DRIVER(
10354                                 "Mode change not required for front porch change, "
10355                                 "setting mode_changed to %d",
10356                                 new_crtc_state->mode_changed);
10357
10358                         set_freesync_fixed_config(dm_new_crtc_state);
10359
10360                         goto skip_modeset;
10361                 } else if (aconnector &&
10362                            is_freesync_video_mode(&new_crtc_state->mode,
10363                                                   aconnector)) {
10364                         struct drm_display_mode *high_mode;
10365
10366                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10367                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10368                                 set_freesync_fixed_config(dm_new_crtc_state);
10369                         }
10370                 }
10371
10372                 ret = dm_atomic_get_state(state, &dm_state);
10373                 if (ret)
10374                         goto fail;
10375
10376                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10377                                 crtc->base.id);
10378
10379                 /* i.e. reset mode */
10380                 if (dc_remove_stream_from_ctx(
10381                                 dm->dc,
10382                                 dm_state->context,
10383                                 dm_old_crtc_state->stream) != DC_OK) {
10384                         ret = -EINVAL;
10385                         goto fail;
10386                 }
10387
10388                 dc_stream_release(dm_old_crtc_state->stream);
10389                 dm_new_crtc_state->stream = NULL;
10390
10391                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10392
10393                 *lock_and_validation_needed = true;
10394
10395         } else {/* Add stream for any updated/enabled CRTC */
10396                 /*
10397                  * Quick fix to prevent NULL pointer on new_stream when
10398                  * added MST connectors not found in existing crtc_state in the chained mode
10399                  * TODO: need to dig out the root cause of that
10400                  */
10401                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10402                         goto skip_modeset;
10403
10404                 if (modereset_required(new_crtc_state))
10405                         goto skip_modeset;
10406
10407                 if (modeset_required(new_crtc_state, new_stream,
10408                                      dm_old_crtc_state->stream)) {
10409
10410                         WARN_ON(dm_new_crtc_state->stream);
10411
10412                         ret = dm_atomic_get_state(state, &dm_state);
10413                         if (ret)
10414                                 goto fail;
10415
10416                         dm_new_crtc_state->stream = new_stream;
10417
10418                         dc_stream_retain(new_stream);
10419
10420                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10421                                          crtc->base.id);
10422
10423                         if (dc_add_stream_to_ctx(
10424                                         dm->dc,
10425                                         dm_state->context,
10426                                         dm_new_crtc_state->stream) != DC_OK) {
10427                                 ret = -EINVAL;
10428                                 goto fail;
10429                         }
10430
10431                         *lock_and_validation_needed = true;
10432                 }
10433         }
10434
10435 skip_modeset:
10436         /* Release extra reference */
10437         if (new_stream)
10438                  dc_stream_release(new_stream);
10439
10440         /*
10441          * We want to do dc stream updates that do not require a
10442          * full modeset below.
10443          */
10444         if (!(enable && aconnector && new_crtc_state->active))
10445                 return 0;
10446         /*
10447          * Given above conditions, the dc state cannot be NULL because:
10448          * 1. We're in the process of enabling CRTCs (just been added
10449          *    to the dc context, or already is on the context)
10450          * 2. Has a valid connector attached, and
10451          * 3. Is currently active and enabled.
10452          * => The dc stream state currently exists.
10453          */
10454         BUG_ON(dm_new_crtc_state->stream == NULL);
10455
10456         /* Scaling or underscan settings */
10457         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10458                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10459                 update_stream_scaling_settings(
10460                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10461
10462         /* ABM settings */
10463         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10464
10465         /*
10466          * Color management settings. We also update color properties
10467          * when a modeset is needed, to ensure it gets reprogrammed.
10468          */
10469         if (dm_new_crtc_state->base.color_mgmt_changed ||
10470             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10471                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10472                 if (ret)
10473                         goto fail;
10474         }
10475
10476         /* Update Freesync settings. */
10477         get_freesync_config_for_crtc(dm_new_crtc_state,
10478                                      dm_new_conn_state);
10479
10480         return ret;
10481
10482 fail:
10483         if (new_stream)
10484                 dc_stream_release(new_stream);
10485         return ret;
10486 }
10487
10488 static bool should_reset_plane(struct drm_atomic_state *state,
10489                                struct drm_plane *plane,
10490                                struct drm_plane_state *old_plane_state,
10491                                struct drm_plane_state *new_plane_state)
10492 {
10493         struct drm_plane *other;
10494         struct drm_plane_state *old_other_state, *new_other_state;
10495         struct drm_crtc_state *new_crtc_state;
10496         int i;
10497
10498         /*
10499          * TODO: Remove this hack once the checks below are sufficient
10500          * enough to determine when we need to reset all the planes on
10501          * the stream.
10502          */
10503         if (state->allow_modeset)
10504                 return true;
10505
10506         /* Exit early if we know that we're adding or removing the plane. */
10507         if (old_plane_state->crtc != new_plane_state->crtc)
10508                 return true;
10509
10510         /* old crtc == new_crtc == NULL, plane not in context. */
10511         if (!new_plane_state->crtc)
10512                 return false;
10513
10514         new_crtc_state =
10515                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10516
10517         if (!new_crtc_state)
10518                 return true;
10519
10520         /* CRTC Degamma changes currently require us to recreate planes. */
10521         if (new_crtc_state->color_mgmt_changed)
10522                 return true;
10523
10524         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10525                 return true;
10526
10527         /*
10528          * If there are any new primary or overlay planes being added or
10529          * removed then the z-order can potentially change. To ensure
10530          * correct z-order and pipe acquisition the current DC architecture
10531          * requires us to remove and recreate all existing planes.
10532          *
10533          * TODO: Come up with a more elegant solution for this.
10534          */
10535         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10536                 struct amdgpu_framebuffer *old_afb, *new_afb;
10537                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10538                         continue;
10539
10540                 if (old_other_state->crtc != new_plane_state->crtc &&
10541                     new_other_state->crtc != new_plane_state->crtc)
10542                         continue;
10543
10544                 if (old_other_state->crtc != new_other_state->crtc)
10545                         return true;
10546
10547                 /* Src/dst size and scaling updates. */
10548                 if (old_other_state->src_w != new_other_state->src_w ||
10549                     old_other_state->src_h != new_other_state->src_h ||
10550                     old_other_state->crtc_w != new_other_state->crtc_w ||
10551                     old_other_state->crtc_h != new_other_state->crtc_h)
10552                         return true;
10553
10554                 /* Rotation / mirroring updates. */
10555                 if (old_other_state->rotation != new_other_state->rotation)
10556                         return true;
10557
10558                 /* Blending updates. */
10559                 if (old_other_state->pixel_blend_mode !=
10560                     new_other_state->pixel_blend_mode)
10561                         return true;
10562
10563                 /* Alpha updates. */
10564                 if (old_other_state->alpha != new_other_state->alpha)
10565                         return true;
10566
10567                 /* Colorspace changes. */
10568                 if (old_other_state->color_range != new_other_state->color_range ||
10569                     old_other_state->color_encoding != new_other_state->color_encoding)
10570                         return true;
10571
10572                 /* Framebuffer checks fall at the end. */
10573                 if (!old_other_state->fb || !new_other_state->fb)
10574                         continue;
10575
10576                 /* Pixel format changes can require bandwidth updates. */
10577                 if (old_other_state->fb->format != new_other_state->fb->format)
10578                         return true;
10579
10580                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10581                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10582
10583                 /* Tiling and DCC changes also require bandwidth updates. */
10584                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10585                     old_afb->base.modifier != new_afb->base.modifier)
10586                         return true;
10587         }
10588
10589         return false;
10590 }
10591
10592 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10593                               struct drm_plane_state *new_plane_state,
10594                               struct drm_framebuffer *fb)
10595 {
10596         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10597         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10598         unsigned int pitch;
10599         bool linear;
10600
10601         if (fb->width > new_acrtc->max_cursor_width ||
10602             fb->height > new_acrtc->max_cursor_height) {
10603                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10604                                  new_plane_state->fb->width,
10605                                  new_plane_state->fb->height);
10606                 return -EINVAL;
10607         }
10608         if (new_plane_state->src_w != fb->width << 16 ||
10609             new_plane_state->src_h != fb->height << 16) {
10610                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10611                 return -EINVAL;
10612         }
10613
10614         /* Pitch in pixels */
10615         pitch = fb->pitches[0] / fb->format->cpp[0];
10616
10617         if (fb->width != pitch) {
10618                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10619                                  fb->width, pitch);
10620                 return -EINVAL;
10621         }
10622
10623         switch (pitch) {
10624         case 64:
10625         case 128:
10626         case 256:
10627                 /* FB pitch is supported by cursor plane */
10628                 break;
10629         default:
10630                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10631                 return -EINVAL;
10632         }
10633
10634         /* Core DRM takes care of checking FB modifiers, so we only need to
10635          * check tiling flags when the FB doesn't have a modifier. */
10636         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10637                 if (adev->family < AMDGPU_FAMILY_AI) {
10638                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10639                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10640                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10641                 } else {
10642                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10643                 }
10644                 if (!linear) {
10645                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10646                         return -EINVAL;
10647                 }
10648         }
10649
10650         return 0;
10651 }
10652
10653 static int dm_update_plane_state(struct dc *dc,
10654                                  struct drm_atomic_state *state,
10655                                  struct drm_plane *plane,
10656                                  struct drm_plane_state *old_plane_state,
10657                                  struct drm_plane_state *new_plane_state,
10658                                  bool enable,
10659                                  bool *lock_and_validation_needed)
10660 {
10661
10662         struct dm_atomic_state *dm_state = NULL;
10663         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10664         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10665         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10666         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10667         struct amdgpu_crtc *new_acrtc;
10668         bool needs_reset;
10669         int ret = 0;
10670
10671
10672         new_plane_crtc = new_plane_state->crtc;
10673         old_plane_crtc = old_plane_state->crtc;
10674         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10675         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10676
10677         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10678                 if (!enable || !new_plane_crtc ||
10679                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10680                         return 0;
10681
10682                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10683
10684                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10685                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10686                         return -EINVAL;
10687                 }
10688
10689                 if (new_plane_state->fb) {
10690                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10691                                                  new_plane_state->fb);
10692                         if (ret)
10693                                 return ret;
10694                 }
10695
10696                 return 0;
10697         }
10698
10699         needs_reset = should_reset_plane(state, plane, old_plane_state,
10700                                          new_plane_state);
10701
10702         /* Remove any changed/removed planes */
10703         if (!enable) {
10704                 if (!needs_reset)
10705                         return 0;
10706
10707                 if (!old_plane_crtc)
10708                         return 0;
10709
10710                 old_crtc_state = drm_atomic_get_old_crtc_state(
10711                                 state, old_plane_crtc);
10712                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10713
10714                 if (!dm_old_crtc_state->stream)
10715                         return 0;
10716
10717                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10718                                 plane->base.id, old_plane_crtc->base.id);
10719
10720                 ret = dm_atomic_get_state(state, &dm_state);
10721                 if (ret)
10722                         return ret;
10723
10724                 if (!dc_remove_plane_from_context(
10725                                 dc,
10726                                 dm_old_crtc_state->stream,
10727                                 dm_old_plane_state->dc_state,
10728                                 dm_state->context)) {
10729
10730                         return -EINVAL;
10731                 }
10732
10733
10734                 dc_plane_state_release(dm_old_plane_state->dc_state);
10735                 dm_new_plane_state->dc_state = NULL;
10736
10737                 *lock_and_validation_needed = true;
10738
10739         } else { /* Add new planes */
10740                 struct dc_plane_state *dc_new_plane_state;
10741
10742                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10743                         return 0;
10744
10745                 if (!new_plane_crtc)
10746                         return 0;
10747
10748                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10749                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10750
10751                 if (!dm_new_crtc_state->stream)
10752                         return 0;
10753
10754                 if (!needs_reset)
10755                         return 0;
10756
10757                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10758                 if (ret)
10759                         return ret;
10760
10761                 WARN_ON(dm_new_plane_state->dc_state);
10762
10763                 dc_new_plane_state = dc_create_plane_state(dc);
10764                 if (!dc_new_plane_state)
10765                         return -ENOMEM;
10766
10767                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10768                                  plane->base.id, new_plane_crtc->base.id);
10769
10770                 ret = fill_dc_plane_attributes(
10771                         drm_to_adev(new_plane_crtc->dev),
10772                         dc_new_plane_state,
10773                         new_plane_state,
10774                         new_crtc_state);
10775                 if (ret) {
10776                         dc_plane_state_release(dc_new_plane_state);
10777                         return ret;
10778                 }
10779
10780                 ret = dm_atomic_get_state(state, &dm_state);
10781                 if (ret) {
10782                         dc_plane_state_release(dc_new_plane_state);
10783                         return ret;
10784                 }
10785
10786                 /*
10787                  * Any atomic check errors that occur after this will
10788                  * not need a release. The plane state will be attached
10789                  * to the stream, and therefore part of the atomic
10790                  * state. It'll be released when the atomic state is
10791                  * cleaned.
10792                  */
10793                 if (!dc_add_plane_to_context(
10794                                 dc,
10795                                 dm_new_crtc_state->stream,
10796                                 dc_new_plane_state,
10797                                 dm_state->context)) {
10798
10799                         dc_plane_state_release(dc_new_plane_state);
10800                         return -EINVAL;
10801                 }
10802
10803                 dm_new_plane_state->dc_state = dc_new_plane_state;
10804
10805                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10806
10807                 /* Tell DC to do a full surface update every time there
10808                  * is a plane change. Inefficient, but works for now.
10809                  */
10810                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10811
10812                 *lock_and_validation_needed = true;
10813         }
10814
10815
10816         return ret;
10817 }
10818
10819 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10820                                        int *src_w, int *src_h)
10821 {
10822         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10823         case DRM_MODE_ROTATE_90:
10824         case DRM_MODE_ROTATE_270:
10825                 *src_w = plane_state->src_h >> 16;
10826                 *src_h = plane_state->src_w >> 16;
10827                 break;
10828         case DRM_MODE_ROTATE_0:
10829         case DRM_MODE_ROTATE_180:
10830         default:
10831                 *src_w = plane_state->src_w >> 16;
10832                 *src_h = plane_state->src_h >> 16;
10833                 break;
10834         }
10835 }
10836
10837 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10838                                 struct drm_crtc *crtc,
10839                                 struct drm_crtc_state *new_crtc_state)
10840 {
10841         struct drm_plane *cursor = crtc->cursor, *underlying;
10842         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10843         int i;
10844         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10845         int cursor_src_w, cursor_src_h;
10846         int underlying_src_w, underlying_src_h;
10847
10848         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10849          * cursor per pipe but it's going to inherit the scaling and
10850          * positioning from the underlying pipe. Check the cursor plane's
10851          * blending properties match the underlying planes'. */
10852
10853         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10854         if (!new_cursor_state || !new_cursor_state->fb) {
10855                 return 0;
10856         }
10857
10858         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10859         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10860         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10861
10862         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10863                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10864                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10865                         continue;
10866
10867                 /* Ignore disabled planes */
10868                 if (!new_underlying_state->fb)
10869                         continue;
10870
10871                 dm_get_oriented_plane_size(new_underlying_state,
10872                                            &underlying_src_w, &underlying_src_h);
10873                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10874                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10875
10876                 if (cursor_scale_w != underlying_scale_w ||
10877                     cursor_scale_h != underlying_scale_h) {
10878                         drm_dbg_atomic(crtc->dev,
10879                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10880                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10881                         return -EINVAL;
10882                 }
10883
10884                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10885                 if (new_underlying_state->crtc_x <= 0 &&
10886                     new_underlying_state->crtc_y <= 0 &&
10887                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10888                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10889                         break;
10890         }
10891
10892         return 0;
10893 }
10894
10895 #if defined(CONFIG_DRM_AMD_DC_DCN)
10896 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10897 {
10898         struct drm_connector *connector;
10899         struct drm_connector_state *conn_state, *old_conn_state;
10900         struct amdgpu_dm_connector *aconnector = NULL;
10901         int i;
10902         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10903                 if (!conn_state->crtc)
10904                         conn_state = old_conn_state;
10905
10906                 if (conn_state->crtc != crtc)
10907                         continue;
10908
10909                 aconnector = to_amdgpu_dm_connector(connector);
10910                 if (!aconnector->port || !aconnector->mst_port)
10911                         aconnector = NULL;
10912                 else
10913                         break;
10914         }
10915
10916         if (!aconnector)
10917                 return 0;
10918
10919         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10920 }
10921 #endif
10922
10923 /**
10924  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10925  * @dev: The DRM device
10926  * @state: The atomic state to commit
10927  *
10928  * Validate that the given atomic state is programmable by DC into hardware.
10929  * This involves constructing a &struct dc_state reflecting the new hardware
10930  * state we wish to commit, then querying DC to see if it is programmable. It's
10931  * important not to modify the existing DC state. Otherwise, atomic_check
10932  * may unexpectedly commit hardware changes.
10933  *
10934  * When validating the DC state, it's important that the right locks are
10935  * acquired. For full updates case which removes/adds/updates streams on one
10936  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10937  * that any such full update commit will wait for completion of any outstanding
10938  * flip using DRMs synchronization events.
10939  *
10940  * Note that DM adds the affected connectors for all CRTCs in state, when that
10941  * might not seem necessary. This is because DC stream creation requires the
10942  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10943  * be possible but non-trivial - a possible TODO item.
10944  *
10945  * Return: -Error code if validation failed.
10946  */
10947 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10948                                   struct drm_atomic_state *state)
10949 {
10950         struct amdgpu_device *adev = drm_to_adev(dev);
10951         struct dm_atomic_state *dm_state = NULL;
10952         struct dc *dc = adev->dm.dc;
10953         struct drm_connector *connector;
10954         struct drm_connector_state *old_con_state, *new_con_state;
10955         struct drm_crtc *crtc;
10956         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10957         struct drm_plane *plane;
10958         struct drm_plane_state *old_plane_state, *new_plane_state;
10959         enum dc_status status;
10960         int ret, i;
10961         bool lock_and_validation_needed = false;
10962         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10963 #if defined(CONFIG_DRM_AMD_DC_DCN)
10964         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10965         struct drm_dp_mst_topology_state *mst_state;
10966         struct drm_dp_mst_topology_mgr *mgr;
10967 #endif
10968
10969         trace_amdgpu_dm_atomic_check_begin(state);
10970
10971         ret = drm_atomic_helper_check_modeset(dev, state);
10972         if (ret) {
10973                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10974                 goto fail;
10975         }
10976
10977         /* Check connector changes */
10978         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10979                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10980                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10981
10982                 /* Skip connectors that are disabled or part of modeset already. */
10983                 if (!old_con_state->crtc && !new_con_state->crtc)
10984                         continue;
10985
10986                 if (!new_con_state->crtc)
10987                         continue;
10988
10989                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10990                 if (IS_ERR(new_crtc_state)) {
10991                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10992                         ret = PTR_ERR(new_crtc_state);
10993                         goto fail;
10994                 }
10995
10996                 if (dm_old_con_state->abm_level !=
10997                     dm_new_con_state->abm_level)
10998                         new_crtc_state->connectors_changed = true;
10999         }
11000
11001 #if defined(CONFIG_DRM_AMD_DC_DCN)
11002         if (dc_resource_is_dsc_encoding_supported(dc)) {
11003                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11004                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11005                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11006                                 if (ret) {
11007                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11008                                         goto fail;
11009                                 }
11010                         }
11011                 }
11012                 pre_validate_dsc(state, &dm_state, vars);
11013         }
11014 #endif
11015         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11016                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11017
11018                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11019                     !new_crtc_state->color_mgmt_changed &&
11020                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11021                         dm_old_crtc_state->dsc_force_changed == false)
11022                         continue;
11023
11024                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11025                 if (ret) {
11026                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11027                         goto fail;
11028                 }
11029
11030                 if (!new_crtc_state->enable)
11031                         continue;
11032
11033                 ret = drm_atomic_add_affected_connectors(state, crtc);
11034                 if (ret) {
11035                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11036                         goto fail;
11037                 }
11038
11039                 ret = drm_atomic_add_affected_planes(state, crtc);
11040                 if (ret) {
11041                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11042                         goto fail;
11043                 }
11044
11045                 if (dm_old_crtc_state->dsc_force_changed)
11046                         new_crtc_state->mode_changed = true;
11047         }
11048
11049         /*
11050          * Add all primary and overlay planes on the CRTC to the state
11051          * whenever a plane is enabled to maintain correct z-ordering
11052          * and to enable fast surface updates.
11053          */
11054         drm_for_each_crtc(crtc, dev) {
11055                 bool modified = false;
11056
11057                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11058                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11059                                 continue;
11060
11061                         if (new_plane_state->crtc == crtc ||
11062                             old_plane_state->crtc == crtc) {
11063                                 modified = true;
11064                                 break;
11065                         }
11066                 }
11067
11068                 if (!modified)
11069                         continue;
11070
11071                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11072                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11073                                 continue;
11074
11075                         new_plane_state =
11076                                 drm_atomic_get_plane_state(state, plane);
11077
11078                         if (IS_ERR(new_plane_state)) {
11079                                 ret = PTR_ERR(new_plane_state);
11080                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11081                                 goto fail;
11082                         }
11083                 }
11084         }
11085
11086         /* Remove exiting planes if they are modified */
11087         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11088                 ret = dm_update_plane_state(dc, state, plane,
11089                                             old_plane_state,
11090                                             new_plane_state,
11091                                             false,
11092                                             &lock_and_validation_needed);
11093                 if (ret) {
11094                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11095                         goto fail;
11096                 }
11097         }
11098
11099         /* Disable all crtcs which require disable */
11100         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11101                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11102                                            old_crtc_state,
11103                                            new_crtc_state,
11104                                            false,
11105                                            &lock_and_validation_needed);
11106                 if (ret) {
11107                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11108                         goto fail;
11109                 }
11110         }
11111
11112         /* Enable all crtcs which require enable */
11113         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11114                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11115                                            old_crtc_state,
11116                                            new_crtc_state,
11117                                            true,
11118                                            &lock_and_validation_needed);
11119                 if (ret) {
11120                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11121                         goto fail;
11122                 }
11123         }
11124
11125         /* Add new/modified planes */
11126         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11127                 ret = dm_update_plane_state(dc, state, plane,
11128                                             old_plane_state,
11129                                             new_plane_state,
11130                                             true,
11131                                             &lock_and_validation_needed);
11132                 if (ret) {
11133                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11134                         goto fail;
11135                 }
11136         }
11137
11138         /* Run this here since we want to validate the streams we created */
11139         ret = drm_atomic_helper_check_planes(dev, state);
11140         if (ret) {
11141                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11142                 goto fail;
11143         }
11144
11145         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11146                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11147                 if (dm_new_crtc_state->mpo_requested)
11148                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11149         }
11150
11151         /* Check cursor planes scaling */
11152         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11153                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11154                 if (ret) {
11155                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11156                         goto fail;
11157                 }
11158         }
11159
11160         if (state->legacy_cursor_update) {
11161                 /*
11162                  * This is a fast cursor update coming from the plane update
11163                  * helper, check if it can be done asynchronously for better
11164                  * performance.
11165                  */
11166                 state->async_update =
11167                         !drm_atomic_helper_async_check(dev, state);
11168
11169                 /*
11170                  * Skip the remaining global validation if this is an async
11171                  * update. Cursor updates can be done without affecting
11172                  * state or bandwidth calcs and this avoids the performance
11173                  * penalty of locking the private state object and
11174                  * allocating a new dc_state.
11175                  */
11176                 if (state->async_update)
11177                         return 0;
11178         }
11179
11180         /* Check scaling and underscan changes*/
11181         /* TODO Removed scaling changes validation due to inability to commit
11182          * new stream into context w\o causing full reset. Need to
11183          * decide how to handle.
11184          */
11185         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11186                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11187                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11188                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11189
11190                 /* Skip any modesets/resets */
11191                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11192                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11193                         continue;
11194
11195                 /* Skip any thing not scale or underscan changes */
11196                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11197                         continue;
11198
11199                 lock_and_validation_needed = true;
11200         }
11201
11202 #if defined(CONFIG_DRM_AMD_DC_DCN)
11203         /* set the slot info for each mst_state based on the link encoding format */
11204         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11205                 struct amdgpu_dm_connector *aconnector;
11206                 struct drm_connector *connector;
11207                 struct drm_connector_list_iter iter;
11208                 u8 link_coding_cap;
11209
11210                 if (!mgr->mst_state )
11211                         continue;
11212
11213                 drm_connector_list_iter_begin(dev, &iter);
11214                 drm_for_each_connector_iter(connector, &iter) {
11215                         int id = connector->index;
11216
11217                         if (id == mst_state->mgr->conn_base_id) {
11218                                 aconnector = to_amdgpu_dm_connector(connector);
11219                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11220                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11221
11222                                 break;
11223                         }
11224                 }
11225                 drm_connector_list_iter_end(&iter);
11226
11227         }
11228 #endif
11229         /**
11230          * Streams and planes are reset when there are changes that affect
11231          * bandwidth. Anything that affects bandwidth needs to go through
11232          * DC global validation to ensure that the configuration can be applied
11233          * to hardware.
11234          *
11235          * We have to currently stall out here in atomic_check for outstanding
11236          * commits to finish in this case because our IRQ handlers reference
11237          * DRM state directly - we can end up disabling interrupts too early
11238          * if we don't.
11239          *
11240          * TODO: Remove this stall and drop DM state private objects.
11241          */
11242         if (lock_and_validation_needed) {
11243                 ret = dm_atomic_get_state(state, &dm_state);
11244                 if (ret) {
11245                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11246                         goto fail;
11247                 }
11248
11249                 ret = do_aquire_global_lock(dev, state);
11250                 if (ret) {
11251                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11252                         goto fail;
11253                 }
11254
11255 #if defined(CONFIG_DRM_AMD_DC_DCN)
11256                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11257                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11258                         goto fail;
11259                 }
11260
11261                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11262                 if (ret) {
11263                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11264                         goto fail;
11265                 }
11266 #endif
11267
11268                 /*
11269                  * Perform validation of MST topology in the state:
11270                  * We need to perform MST atomic check before calling
11271                  * dc_validate_global_state(), or there is a chance
11272                  * to get stuck in an infinite loop and hang eventually.
11273                  */
11274                 ret = drm_dp_mst_atomic_check(state);
11275                 if (ret) {
11276                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11277                         goto fail;
11278                 }
11279                 status = dc_validate_global_state(dc, dm_state->context, true);
11280                 if (status != DC_OK) {
11281                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11282                                        dc_status_to_str(status), status);
11283                         ret = -EINVAL;
11284                         goto fail;
11285                 }
11286         } else {
11287                 /*
11288                  * The commit is a fast update. Fast updates shouldn't change
11289                  * the DC context, affect global validation, and can have their
11290                  * commit work done in parallel with other commits not touching
11291                  * the same resource. If we have a new DC context as part of
11292                  * the DM atomic state from validation we need to free it and
11293                  * retain the existing one instead.
11294                  *
11295                  * Furthermore, since the DM atomic state only contains the DC
11296                  * context and can safely be annulled, we can free the state
11297                  * and clear the associated private object now to free
11298                  * some memory and avoid a possible use-after-free later.
11299                  */
11300
11301                 for (i = 0; i < state->num_private_objs; i++) {
11302                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11303
11304                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11305                                 int j = state->num_private_objs-1;
11306
11307                                 dm_atomic_destroy_state(obj,
11308                                                 state->private_objs[i].state);
11309
11310                                 /* If i is not at the end of the array then the
11311                                  * last element needs to be moved to where i was
11312                                  * before the array can safely be truncated.
11313                                  */
11314                                 if (i != j)
11315                                         state->private_objs[i] =
11316                                                 state->private_objs[j];
11317
11318                                 state->private_objs[j].ptr = NULL;
11319                                 state->private_objs[j].state = NULL;
11320                                 state->private_objs[j].old_state = NULL;
11321                                 state->private_objs[j].new_state = NULL;
11322
11323                                 state->num_private_objs = j;
11324                                 break;
11325                         }
11326                 }
11327         }
11328
11329         /* Store the overall update type for use later in atomic check. */
11330         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11331                 struct dm_crtc_state *dm_new_crtc_state =
11332                         to_dm_crtc_state(new_crtc_state);
11333
11334                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11335                                                          UPDATE_TYPE_FULL :
11336                                                          UPDATE_TYPE_FAST;
11337         }
11338
11339         /* Must be success */
11340         WARN_ON(ret);
11341
11342         trace_amdgpu_dm_atomic_check_finish(state, ret);
11343
11344         return ret;
11345
11346 fail:
11347         if (ret == -EDEADLK)
11348                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11349         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11350                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11351         else
11352                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11353
11354         trace_amdgpu_dm_atomic_check_finish(state, ret);
11355
11356         return ret;
11357 }
11358
11359 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11360                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11361 {
11362         uint8_t dpcd_data;
11363         bool capable = false;
11364
11365         if (amdgpu_dm_connector->dc_link &&
11366                 dm_helpers_dp_read_dpcd(
11367                                 NULL,
11368                                 amdgpu_dm_connector->dc_link,
11369                                 DP_DOWN_STREAM_PORT_COUNT,
11370                                 &dpcd_data,
11371                                 sizeof(dpcd_data))) {
11372                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11373         }
11374
11375         return capable;
11376 }
11377
11378 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11379                 unsigned int offset,
11380                 unsigned int total_length,
11381                 uint8_t *data,
11382                 unsigned int length,
11383                 struct amdgpu_hdmi_vsdb_info *vsdb)
11384 {
11385         bool res;
11386         union dmub_rb_cmd cmd;
11387         struct dmub_cmd_send_edid_cea *input;
11388         struct dmub_cmd_edid_cea_output *output;
11389
11390         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11391                 return false;
11392
11393         memset(&cmd, 0, sizeof(cmd));
11394
11395         input = &cmd.edid_cea.data.input;
11396
11397         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11398         cmd.edid_cea.header.sub_type = 0;
11399         cmd.edid_cea.header.payload_bytes =
11400                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11401         input->offset = offset;
11402         input->length = length;
11403         input->cea_total_length = total_length;
11404         memcpy(input->payload, data, length);
11405
11406         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11407         if (!res) {
11408                 DRM_ERROR("EDID CEA parser failed\n");
11409                 return false;
11410         }
11411
11412         output = &cmd.edid_cea.data.output;
11413
11414         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11415                 if (!output->ack.success) {
11416                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11417                                         output->ack.offset);
11418                 }
11419         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11420                 if (!output->amd_vsdb.vsdb_found)
11421                         return false;
11422
11423                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11424                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11425                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11426                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11427         } else {
11428                 DRM_WARN("Unknown EDID CEA parser results\n");
11429                 return false;
11430         }
11431
11432         return true;
11433 }
11434
11435 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11436                 uint8_t *edid_ext, int len,
11437                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11438 {
11439         int i;
11440
11441         /* send extension block to DMCU for parsing */
11442         for (i = 0; i < len; i += 8) {
11443                 bool res;
11444                 int offset;
11445
11446                 /* send 8 bytes a time */
11447                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11448                         return false;
11449
11450                 if (i+8 == len) {
11451                         /* EDID block sent completed, expect result */
11452                         int version, min_rate, max_rate;
11453
11454                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11455                         if (res) {
11456                                 /* amd vsdb found */
11457                                 vsdb_info->freesync_supported = 1;
11458                                 vsdb_info->amd_vsdb_version = version;
11459                                 vsdb_info->min_refresh_rate_hz = min_rate;
11460                                 vsdb_info->max_refresh_rate_hz = max_rate;
11461                                 return true;
11462                         }
11463                         /* not amd vsdb */
11464                         return false;
11465                 }
11466
11467                 /* check for ack*/
11468                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11469                 if (!res)
11470                         return false;
11471         }
11472
11473         return false;
11474 }
11475
11476 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11477                 uint8_t *edid_ext, int len,
11478                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11479 {
11480         int i;
11481
11482         /* send extension block to DMCU for parsing */
11483         for (i = 0; i < len; i += 8) {
11484                 /* send 8 bytes a time */
11485                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11486                         return false;
11487         }
11488
11489         return vsdb_info->freesync_supported;
11490 }
11491
11492 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11493                 uint8_t *edid_ext, int len,
11494                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11495 {
11496         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11497
11498         if (adev->dm.dmub_srv)
11499                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11500         else
11501                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11502 }
11503
11504 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11505                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11506 {
11507         uint8_t *edid_ext = NULL;
11508         int i;
11509         bool valid_vsdb_found = false;
11510
11511         /*----- drm_find_cea_extension() -----*/
11512         /* No EDID or EDID extensions */
11513         if (edid == NULL || edid->extensions == 0)
11514                 return -ENODEV;
11515
11516         /* Find CEA extension */
11517         for (i = 0; i < edid->extensions; i++) {
11518                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11519                 if (edid_ext[0] == CEA_EXT)
11520                         break;
11521         }
11522
11523         if (i == edid->extensions)
11524                 return -ENODEV;
11525
11526         /*----- cea_db_offsets() -----*/
11527         if (edid_ext[0] != CEA_EXT)
11528                 return -ENODEV;
11529
11530         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11531
11532         return valid_vsdb_found ? i : -ENODEV;
11533 }
11534
11535 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11536                                         struct edid *edid)
11537 {
11538         int i = 0;
11539         struct detailed_timing *timing;
11540         struct detailed_non_pixel *data;
11541         struct detailed_data_monitor_range *range;
11542         struct amdgpu_dm_connector *amdgpu_dm_connector =
11543                         to_amdgpu_dm_connector(connector);
11544         struct dm_connector_state *dm_con_state = NULL;
11545         struct dc_sink *sink;
11546
11547         struct drm_device *dev = connector->dev;
11548         struct amdgpu_device *adev = drm_to_adev(dev);
11549         bool freesync_capable = false;
11550         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11551
11552         if (!connector->state) {
11553                 DRM_ERROR("%s - Connector has no state", __func__);
11554                 goto update;
11555         }
11556
11557         sink = amdgpu_dm_connector->dc_sink ?
11558                 amdgpu_dm_connector->dc_sink :
11559                 amdgpu_dm_connector->dc_em_sink;
11560
11561         if (!edid || !sink) {
11562                 dm_con_state = to_dm_connector_state(connector->state);
11563
11564                 amdgpu_dm_connector->min_vfreq = 0;
11565                 amdgpu_dm_connector->max_vfreq = 0;
11566                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11567                 connector->display_info.monitor_range.min_vfreq = 0;
11568                 connector->display_info.monitor_range.max_vfreq = 0;
11569                 freesync_capable = false;
11570
11571                 goto update;
11572         }
11573
11574         dm_con_state = to_dm_connector_state(connector->state);
11575
11576         if (!adev->dm.freesync_module)
11577                 goto update;
11578
11579
11580         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11581                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11582                 bool edid_check_required = false;
11583
11584                 if (edid) {
11585                         edid_check_required = is_dp_capable_without_timing_msa(
11586                                                 adev->dm.dc,
11587                                                 amdgpu_dm_connector);
11588                 }
11589
11590                 if (edid_check_required == true && (edid->version > 1 ||
11591                    (edid->version == 1 && edid->revision > 1))) {
11592                         for (i = 0; i < 4; i++) {
11593
11594                                 timing  = &edid->detailed_timings[i];
11595                                 data    = &timing->data.other_data;
11596                                 range   = &data->data.range;
11597                                 /*
11598                                  * Check if monitor has continuous frequency mode
11599                                  */
11600                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11601                                         continue;
11602                                 /*
11603                                  * Check for flag range limits only. If flag == 1 then
11604                                  * no additional timing information provided.
11605                                  * Default GTF, GTF Secondary curve and CVT are not
11606                                  * supported
11607                                  */
11608                                 if (range->flags != 1)
11609                                         continue;
11610
11611                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11612                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11613                                 amdgpu_dm_connector->pixel_clock_mhz =
11614                                         range->pixel_clock_mhz * 10;
11615
11616                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11617                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11618
11619                                 break;
11620                         }
11621
11622                         if (amdgpu_dm_connector->max_vfreq -
11623                             amdgpu_dm_connector->min_vfreq > 10) {
11624
11625                                 freesync_capable = true;
11626                         }
11627                 }
11628         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11629                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11630                 if (i >= 0 && vsdb_info.freesync_supported) {
11631                         timing  = &edid->detailed_timings[i];
11632                         data    = &timing->data.other_data;
11633
11634                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11635                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11636                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11637                                 freesync_capable = true;
11638
11639                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11640                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11641                 }
11642         }
11643
11644 update:
11645         if (dm_con_state)
11646                 dm_con_state->freesync_capable = freesync_capable;
11647
11648         if (connector->vrr_capable_property)
11649                 drm_connector_set_vrr_capable_property(connector,
11650                                                        freesync_capable);
11651 }
11652
11653 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11654 {
11655         struct amdgpu_device *adev = drm_to_adev(dev);
11656         struct dc *dc = adev->dm.dc;
11657         int i;
11658
11659         mutex_lock(&adev->dm.dc_lock);
11660         if (dc->current_state) {
11661                 for (i = 0; i < dc->current_state->stream_count; ++i)
11662                         dc->current_state->streams[i]
11663                                 ->triggered_crtc_reset.enabled =
11664                                 adev->dm.force_timing_sync;
11665
11666                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11667                 dc_trigger_sync(dc, dc->current_state);
11668         }
11669         mutex_unlock(&adev->dm.dc_lock);
11670 }
11671
11672 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11673                        uint32_t value, const char *func_name)
11674 {
11675 #ifdef DM_CHECK_ADDR_0
11676         if (address == 0) {
11677                 DC_ERR("invalid register write. address = 0");
11678                 return;
11679         }
11680 #endif
11681         cgs_write_register(ctx->cgs_device, address, value);
11682         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11683 }
11684
11685 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11686                           const char *func_name)
11687 {
11688         uint32_t value;
11689 #ifdef DM_CHECK_ADDR_0
11690         if (address == 0) {
11691                 DC_ERR("invalid register read; address = 0\n");
11692                 return 0;
11693         }
11694 #endif
11695
11696         if (ctx->dmub_srv &&
11697             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11698             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11699                 ASSERT(false);
11700                 return 0;
11701         }
11702
11703         value = cgs_read_register(ctx->cgs_device, address);
11704
11705         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11706
11707         return value;
11708 }
11709
11710 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11711                                                 struct dc_context *ctx,
11712                                                 uint8_t status_type,
11713                                                 uint32_t *operation_result)
11714 {
11715         struct amdgpu_device *adev = ctx->driver_context;
11716         int return_status = -1;
11717         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11718
11719         if (is_cmd_aux) {
11720                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11721                         return_status = p_notify->aux_reply.length;
11722                         *operation_result = p_notify->result;
11723                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11724                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11725                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11726                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11727                 } else {
11728                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11729                 }
11730         } else {
11731                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11732                         return_status = 0;
11733                         *operation_result = p_notify->sc_status;
11734                 } else {
11735                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11736                 }
11737         }
11738
11739         return return_status;
11740 }
11741
11742 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11743         unsigned int link_index, void *cmd_payload, void *operation_result)
11744 {
11745         struct amdgpu_device *adev = ctx->driver_context;
11746         int ret = 0;
11747
11748         if (is_cmd_aux) {
11749                 dc_process_dmub_aux_transfer_async(ctx->dc,
11750                         link_index, (struct aux_payload *)cmd_payload);
11751         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11752                                         (struct set_config_cmd_payload *)cmd_payload,
11753                                         adev->dm.dmub_notify)) {
11754                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11755                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11756                                         (uint32_t *)operation_result);
11757         }
11758
11759         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11760         if (ret == 0) {
11761                 DRM_ERROR("wait_for_completion_timeout timeout!");
11762                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11763                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11764                                 (uint32_t *)operation_result);
11765         }
11766
11767         if (is_cmd_aux) {
11768                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11769                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11770
11771                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11772                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11773                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11774                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11775                                        adev->dm.dmub_notify->aux_reply.length);
11776                         }
11777                 }
11778         }
11779
11780         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11781                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11782                         (uint32_t *)operation_result);
11783 }
11784
11785 /*
11786  * Check whether seamless boot is supported.
11787  *
11788  * So far we only support seamless boot on CHIP_VANGOGH.
11789  * If everything goes well, we may consider expanding
11790  * seamless boot to other ASICs.
11791  */
11792 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11793 {
11794         switch (adev->asic_type) {
11795         case CHIP_VANGOGH:
11796                 if (!adev->mman.keep_stolen_vga_memory)
11797                         return true;
11798                 break;
11799         default:
11800                 break;
11801         }
11802
11803         return false;
11804 }
This page took 0.759287 seconds and 4 git commands to generate.