]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'irq-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git...
[J-linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "vega10_ip_offset.h"
94
95 #include "soc15_common.h"
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121
122 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151         switch (link->dpcd_caps.dongle_type) {
152         case DISPLAY_DONGLE_NONE:
153                 return DRM_MODE_SUBCONNECTOR_Native;
154         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155                 return DRM_MODE_SUBCONNECTOR_VGA;
156         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157         case DISPLAY_DONGLE_DP_DVI_DONGLE:
158                 return DRM_MODE_SUBCONNECTOR_DVID;
159         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161                 return DRM_MODE_SUBCONNECTOR_HDMIA;
162         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163         default:
164                 return DRM_MODE_SUBCONNECTOR_Unknown;
165         }
166 }
167
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170         struct dc_link *link = aconnector->dc_link;
171         struct drm_connector *connector = &aconnector->base;
172         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173
174         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175                 return;
176
177         if (aconnector->dc_sink)
178                 subconnector = get_subconnector_type(link);
179
180         drm_object_property_set_value(&connector->base,
181                         connector->dev->mode_config.dp_subconnector_property,
182                         subconnector);
183 }
184
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197                                 struct drm_plane *plane,
198                                 unsigned long possible_crtcs,
199                                 const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201                                struct drm_plane *plane,
202                                uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
205                                     uint32_t link_index,
206                                     struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208                                   struct amdgpu_encoder *aencoder,
209                                   uint32_t link_index);
210
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216                                   struct drm_atomic_state *state);
217
218 static void handle_cursor_update(struct drm_plane *plane,
219                                  struct drm_plane_state *old_plane_state);
220
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229                                  struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245         if (crtc >= adev->mode_info.num_crtc)
246                 return 0;
247         else {
248                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249
250                 if (acrtc->dm_irq_params.stream == NULL) {
251                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252                                   crtc);
253                         return 0;
254                 }
255
256                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257         }
258 }
259
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261                                   u32 *vbl, u32 *position)
262 {
263         uint32_t v_blank_start, v_blank_end, h_position, v_position;
264
265         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266                 return -EINVAL;
267         else {
268                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269
270                 if (acrtc->dm_irq_params.stream ==  NULL) {
271                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272                                   crtc);
273                         return 0;
274                 }
275
276                 /*
277                  * TODO rework base driver to use values directly.
278                  * for now parse it back into reg-format
279                  */
280                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281                                          &v_blank_start,
282                                          &v_blank_end,
283                                          &h_position,
284                                          &v_position);
285
286                 *position = v_position | (h_position << 16);
287                 *vbl = v_blank_start | (v_blank_end << 16);
288         }
289
290         return 0;
291 }
292
293 static bool dm_is_idle(void *handle)
294 {
295         /* XXX todo */
296         return true;
297 }
298
299 static int dm_wait_for_idle(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static bool dm_check_soft_reset(void *handle)
306 {
307         return false;
308 }
309
310 static int dm_soft_reset(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318                      int otg_inst)
319 {
320         struct drm_device *dev = adev_to_drm(adev);
321         struct drm_crtc *crtc;
322         struct amdgpu_crtc *amdgpu_crtc;
323
324         if (WARN_ON(otg_inst == -1))
325                 return adev->mode_info.crtcs[0];
326
327         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328                 amdgpu_crtc = to_amdgpu_crtc(crtc);
329
330                 if (amdgpu_crtc->otg_inst == otg_inst)
331                         return amdgpu_crtc;
332         }
333
334         return NULL;
335 }
336
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339         return acrtc->dm_irq_params.freesync_config.state ==
340                        VRR_STATE_ACTIVE_VARIABLE ||
341                acrtc->dm_irq_params.freesync_config.state ==
342                        VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352                                               struct dm_crtc_state *new_state)
353 {
354         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355                 return true;
356         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357                 return true;
358         else
359                 return false;
360 }
361
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371         struct amdgpu_crtc *amdgpu_crtc;
372         struct common_irq_params *irq_params = interrupt_params;
373         struct amdgpu_device *adev = irq_params->adev;
374         unsigned long flags;
375         struct drm_pending_vblank_event *e;
376         uint32_t vpos, hpos, v_blank_start, v_blank_end;
377         bool vrr_active;
378
379         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380
381         /* IRQ could occur when in initial stage */
382         /* TODO work and BO cleanup */
383         if (amdgpu_crtc == NULL) {
384                 DC_LOG_PFLIP("CRTC is null, returning.\n");
385                 return;
386         }
387
388         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389
390         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392                                                  amdgpu_crtc->pflip_status,
393                                                  AMDGPU_FLIP_SUBMITTED,
394                                                  amdgpu_crtc->crtc_id,
395                                                  amdgpu_crtc);
396                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397                 return;
398         }
399
400         /* page flip completed. */
401         e = amdgpu_crtc->event;
402         amdgpu_crtc->event = NULL;
403
404         WARN_ON(!e);
405
406         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407
408         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
409         if (!vrr_active ||
410             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411                                       &v_blank_end, &hpos, &vpos) ||
412             (vpos < v_blank_start)) {
413                 /* Update to correct count and vblank timestamp if racing with
414                  * vblank irq. This also updates to the correct vblank timestamp
415                  * even in VRR mode, as scanout is past the front-porch atm.
416                  */
417                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418
419                 /* Wake up userspace by sending the pageflip event with proper
420                  * count and timestamp of vblank of flip completion.
421                  */
422                 if (e) {
423                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424
425                         /* Event sent, so done with vblank for this flip */
426                         drm_crtc_vblank_put(&amdgpu_crtc->base);
427                 }
428         } else if (e) {
429                 /* VRR active and inside front-porch: vblank count and
430                  * timestamp for pageflip event will only be up to date after
431                  * drm_crtc_handle_vblank() has been executed from late vblank
432                  * irq handler after start of back-porch (vline 0). We queue the
433                  * pageflip event for send-out by drm_crtc_handle_vblank() with
434                  * updated timestamp and count, once it runs after us.
435                  *
436                  * We need to open-code this instead of using the helper
437                  * drm_crtc_arm_vblank_event(), as that helper would
438                  * call drm_crtc_accurate_vblank_count(), which we must
439                  * not call in VRR mode while we are in front-porch!
440                  */
441
442                 /* sequence will be replaced by real count during send-out. */
443                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444                 e->pipe = amdgpu_crtc->crtc_id;
445
446                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447                 e = NULL;
448         }
449
450         /* Keep track of vblank of this flip for flip throttling. We use the
451          * cooked hw counter, as that one incremented at start of this vblank
452          * of pageflip completion, so last_flip_vblank is the forbidden count
453          * for queueing new pageflips if vsync + VRR is enabled.
454          */
455         amdgpu_crtc->dm_irq_params.last_flip_vblank =
456                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457
458         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460
461         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462                      amdgpu_crtc->crtc_id, amdgpu_crtc,
463                      vrr_active, (int) !e);
464 }
465
466 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
467 {
468         struct drm_crtc *crtc = &acrtc->base;
469         struct drm_device *dev = crtc->dev;
470         unsigned long flags;
471
472         drm_crtc_handle_vblank(crtc);
473
474         spin_lock_irqsave(&dev->event_lock, flags);
475
476         /* Send completion event for cursor-only commits */
477         if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
478                 drm_crtc_send_vblank_event(crtc, acrtc->event);
479                 drm_crtc_vblank_put(crtc);
480                 acrtc->event = NULL;
481         }
482
483         spin_unlock_irqrestore(&dev->event_lock, flags);
484 }
485
486 static void dm_vupdate_high_irq(void *interrupt_params)
487 {
488         struct common_irq_params *irq_params = interrupt_params;
489         struct amdgpu_device *adev = irq_params->adev;
490         struct amdgpu_crtc *acrtc;
491         struct drm_device *drm_dev;
492         struct drm_vblank_crtc *vblank;
493         ktime_t frame_duration_ns, previous_timestamp;
494         unsigned long flags;
495         int vrr_active;
496
497         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
498
499         if (acrtc) {
500                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
501                 drm_dev = acrtc->base.dev;
502                 vblank = &drm_dev->vblank[acrtc->base.index];
503                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
504                 frame_duration_ns = vblank->time - previous_timestamp;
505
506                 if (frame_duration_ns > 0) {
507                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
508                                                 frame_duration_ns,
509                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
510                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
511                 }
512
513                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
514                               acrtc->crtc_id,
515                               vrr_active);
516
517                 /* Core vblank handling is done here after end of front-porch in
518                  * vrr mode, as vblank timestamping will give valid results
519                  * while now done after front-porch. This will also deliver
520                  * page-flip completion events that have been queued to us
521                  * if a pageflip happened inside front-porch.
522                  */
523                 if (vrr_active) {
524                         dm_crtc_handle_vblank(acrtc);
525
526                         /* BTR processing for pre-DCE12 ASICs */
527                         if (acrtc->dm_irq_params.stream &&
528                             adev->family < AMDGPU_FAMILY_AI) {
529                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
530                                 mod_freesync_handle_v_update(
531                                     adev->dm.freesync_module,
532                                     acrtc->dm_irq_params.stream,
533                                     &acrtc->dm_irq_params.vrr_params);
534
535                                 dc_stream_adjust_vmin_vmax(
536                                     adev->dm.dc,
537                                     acrtc->dm_irq_params.stream,
538                                     &acrtc->dm_irq_params.vrr_params.adjust);
539                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
540                         }
541                 }
542         }
543 }
544
545 /**
546  * dm_crtc_high_irq() - Handles CRTC interrupt
547  * @interrupt_params: used for determining the CRTC instance
548  *
549  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
550  * event handler.
551  */
552 static void dm_crtc_high_irq(void *interrupt_params)
553 {
554         struct common_irq_params *irq_params = interrupt_params;
555         struct amdgpu_device *adev = irq_params->adev;
556         struct amdgpu_crtc *acrtc;
557         unsigned long flags;
558         int vrr_active;
559
560         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
561         if (!acrtc)
562                 return;
563
564         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
565
566         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
567                       vrr_active, acrtc->dm_irq_params.active_planes);
568
569         /**
570          * Core vblank handling at start of front-porch is only possible
571          * in non-vrr mode, as only there vblank timestamping will give
572          * valid results while done in front-porch. Otherwise defer it
573          * to dm_vupdate_high_irq after end of front-porch.
574          */
575         if (!vrr_active)
576                 dm_crtc_handle_vblank(acrtc);
577
578         /**
579          * Following stuff must happen at start of vblank, for crc
580          * computation and below-the-range btr support in vrr mode.
581          */
582         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
583
584         /* BTR updates need to happen before VUPDATE on Vega and above. */
585         if (adev->family < AMDGPU_FAMILY_AI)
586                 return;
587
588         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
589
590         if (acrtc->dm_irq_params.stream &&
591             acrtc->dm_irq_params.vrr_params.supported &&
592             acrtc->dm_irq_params.freesync_config.state ==
593                     VRR_STATE_ACTIVE_VARIABLE) {
594                 mod_freesync_handle_v_update(adev->dm.freesync_module,
595                                              acrtc->dm_irq_params.stream,
596                                              &acrtc->dm_irq_params.vrr_params);
597
598                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
599                                            &acrtc->dm_irq_params.vrr_params.adjust);
600         }
601
602         /*
603          * If there aren't any active_planes then DCH HUBP may be clock-gated.
604          * In that case, pageflip completion interrupts won't fire and pageflip
605          * completion events won't get delivered. Prevent this by sending
606          * pending pageflip events from here if a flip is still pending.
607          *
608          * If any planes are enabled, use dm_pflip_high_irq() instead, to
609          * avoid race conditions between flip programming and completion,
610          * which could cause too early flip completion events.
611          */
612         if (adev->family >= AMDGPU_FAMILY_RV &&
613             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
614             acrtc->dm_irq_params.active_planes == 0) {
615                 if (acrtc->event) {
616                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
617                         acrtc->event = NULL;
618                         drm_crtc_vblank_put(&acrtc->base);
619                 }
620                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
621         }
622
623         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
624 }
625
626 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
627 /**
628  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
629  * DCN generation ASICs
630  * @interrupt_params: interrupt parameters
631  *
632  * Used to set crc window/read out crc value at vertical line 0 position
633  */
634 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
635 {
636         struct common_irq_params *irq_params = interrupt_params;
637         struct amdgpu_device *adev = irq_params->adev;
638         struct amdgpu_crtc *acrtc;
639
640         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
641
642         if (!acrtc)
643                 return;
644
645         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
646 }
647 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
648
649 /**
650  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
651  * @adev: amdgpu_device pointer
652  * @notify: dmub notification structure
653  *
654  * Dmub AUX or SET_CONFIG command completion processing callback
655  * Copies dmub notification to DM which is to be read by AUX command.
656  * issuing thread and also signals the event to wake up the thread.
657  */
658 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
659                                         struct dmub_notification *notify)
660 {
661         if (adev->dm.dmub_notify)
662                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
663         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
664                 complete(&adev->dm.dmub_aux_transfer_done);
665 }
666
667 /**
668  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
669  * @adev: amdgpu_device pointer
670  * @notify: dmub notification structure
671  *
672  * Dmub Hpd interrupt processing callback. Gets displayindex through the
673  * ink index and calls helper to do the processing.
674  */
675 static void dmub_hpd_callback(struct amdgpu_device *adev,
676                               struct dmub_notification *notify)
677 {
678         struct amdgpu_dm_connector *aconnector;
679         struct amdgpu_dm_connector *hpd_aconnector = NULL;
680         struct drm_connector *connector;
681         struct drm_connector_list_iter iter;
682         struct dc_link *link;
683         uint8_t link_index = 0;
684         struct drm_device *dev;
685
686         if (adev == NULL)
687                 return;
688
689         if (notify == NULL) {
690                 DRM_ERROR("DMUB HPD callback notification was NULL");
691                 return;
692         }
693
694         if (notify->link_index > adev->dm.dc->link_count) {
695                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
696                 return;
697         }
698
699         link_index = notify->link_index;
700         link = adev->dm.dc->links[link_index];
701         dev = adev->dm.ddev;
702
703         drm_connector_list_iter_begin(dev, &iter);
704         drm_for_each_connector_iter(connector, &iter) {
705                 aconnector = to_amdgpu_dm_connector(connector);
706                 if (link && aconnector->dc_link == link) {
707                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
708                         hpd_aconnector = aconnector;
709                         break;
710                 }
711         }
712         drm_connector_list_iter_end(&iter);
713
714         if (hpd_aconnector) {
715                 if (notify->type == DMUB_NOTIFICATION_HPD)
716                         handle_hpd_irq_helper(hpd_aconnector);
717                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
718                         handle_hpd_rx_irq(hpd_aconnector);
719         }
720 }
721
722 /**
723  * register_dmub_notify_callback - Sets callback for DMUB notify
724  * @adev: amdgpu_device pointer
725  * @type: Type of dmub notification
726  * @callback: Dmub interrupt callback function
727  * @dmub_int_thread_offload: offload indicator
728  *
729  * API to register a dmub callback handler for a dmub notification
730  * Also sets indicator whether callback processing to be offloaded.
731  * to dmub interrupt handling thread
732  * Return: true if successfully registered, false if there is existing registration
733  */
734 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
735                                           enum dmub_notification_type type,
736                                           dmub_notify_interrupt_callback_t callback,
737                                           bool dmub_int_thread_offload)
738 {
739         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
740                 adev->dm.dmub_callback[type] = callback;
741                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
742         } else
743                 return false;
744
745         return true;
746 }
747
748 static void dm_handle_hpd_work(struct work_struct *work)
749 {
750         struct dmub_hpd_work *dmub_hpd_wrk;
751
752         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
753
754         if (!dmub_hpd_wrk->dmub_notify) {
755                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
756                 return;
757         }
758
759         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
760                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
761                 dmub_hpd_wrk->dmub_notify);
762         }
763
764         kfree(dmub_hpd_wrk->dmub_notify);
765         kfree(dmub_hpd_wrk);
766
767 }
768
769 #define DMUB_TRACE_MAX_READ 64
770 /**
771  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
772  * @interrupt_params: used for determining the Outbox instance
773  *
774  * Handles the Outbox Interrupt
775  * event handler.
776  */
777 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
778 {
779         struct dmub_notification notify;
780         struct common_irq_params *irq_params = interrupt_params;
781         struct amdgpu_device *adev = irq_params->adev;
782         struct amdgpu_display_manager *dm = &adev->dm;
783         struct dmcub_trace_buf_entry entry = { 0 };
784         uint32_t count = 0;
785         struct dmub_hpd_work *dmub_hpd_wrk;
786         struct dc_link *plink = NULL;
787
788         if (dc_enable_dmub_notifications(adev->dm.dc) &&
789                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
790
791                 do {
792                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
793                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
794                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
795                                 continue;
796                         }
797                         if (!dm->dmub_callback[notify.type]) {
798                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
799                                 continue;
800                         }
801                         if (dm->dmub_thread_offload[notify.type] == true) {
802                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
803                                 if (!dmub_hpd_wrk) {
804                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
805                                         return;
806                                 }
807                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
808                                 if (!dmub_hpd_wrk->dmub_notify) {
809                                         kfree(dmub_hpd_wrk);
810                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
811                                         return;
812                                 }
813                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
814                                 if (dmub_hpd_wrk->dmub_notify)
815                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
816                                 dmub_hpd_wrk->adev = adev;
817                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
818                                         plink = adev->dm.dc->links[notify.link_index];
819                                         if (plink) {
820                                                 plink->hpd_status =
821                                                         notify.hpd_status == DP_HPD_PLUG;
822                                         }
823                                 }
824                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
825                         } else {
826                                 dm->dmub_callback[notify.type](adev, &notify);
827                         }
828                 } while (notify.pending_notification);
829         }
830
831
832         do {
833                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
834                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
835                                                         entry.param0, entry.param1);
836
837                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
838                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
839                 } else
840                         break;
841
842                 count++;
843
844         } while (count <= DMUB_TRACE_MAX_READ);
845
846         if (count > DMUB_TRACE_MAX_READ)
847                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
848 }
849
850 static int dm_set_clockgating_state(void *handle,
851                   enum amd_clockgating_state state)
852 {
853         return 0;
854 }
855
856 static int dm_set_powergating_state(void *handle,
857                   enum amd_powergating_state state)
858 {
859         return 0;
860 }
861
862 /* Prototypes of private functions */
863 static int dm_early_init(void* handle);
864
865 /* Allocate memory for FBC compressed data  */
866 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
867 {
868         struct drm_device *dev = connector->dev;
869         struct amdgpu_device *adev = drm_to_adev(dev);
870         struct dm_compressor_info *compressor = &adev->dm.compressor;
871         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
872         struct drm_display_mode *mode;
873         unsigned long max_size = 0;
874
875         if (adev->dm.dc->fbc_compressor == NULL)
876                 return;
877
878         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
879                 return;
880
881         if (compressor->bo_ptr)
882                 return;
883
884
885         list_for_each_entry(mode, &connector->modes, head) {
886                 if (max_size < mode->htotal * mode->vtotal)
887                         max_size = mode->htotal * mode->vtotal;
888         }
889
890         if (max_size) {
891                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
892                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
893                             &compressor->gpu_addr, &compressor->cpu_addr);
894
895                 if (r)
896                         DRM_ERROR("DM: Failed to initialize FBC\n");
897                 else {
898                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
899                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
900                 }
901
902         }
903
904 }
905
906 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
907                                           int pipe, bool *enabled,
908                                           unsigned char *buf, int max_bytes)
909 {
910         struct drm_device *dev = dev_get_drvdata(kdev);
911         struct amdgpu_device *adev = drm_to_adev(dev);
912         struct drm_connector *connector;
913         struct drm_connector_list_iter conn_iter;
914         struct amdgpu_dm_connector *aconnector;
915         int ret = 0;
916
917         *enabled = false;
918
919         mutex_lock(&adev->dm.audio_lock);
920
921         drm_connector_list_iter_begin(dev, &conn_iter);
922         drm_for_each_connector_iter(connector, &conn_iter) {
923                 aconnector = to_amdgpu_dm_connector(connector);
924                 if (aconnector->audio_inst != port)
925                         continue;
926
927                 *enabled = true;
928                 ret = drm_eld_size(connector->eld);
929                 memcpy(buf, connector->eld, min(max_bytes, ret));
930
931                 break;
932         }
933         drm_connector_list_iter_end(&conn_iter);
934
935         mutex_unlock(&adev->dm.audio_lock);
936
937         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
938
939         return ret;
940 }
941
942 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
943         .get_eld = amdgpu_dm_audio_component_get_eld,
944 };
945
946 static int amdgpu_dm_audio_component_bind(struct device *kdev,
947                                        struct device *hda_kdev, void *data)
948 {
949         struct drm_device *dev = dev_get_drvdata(kdev);
950         struct amdgpu_device *adev = drm_to_adev(dev);
951         struct drm_audio_component *acomp = data;
952
953         acomp->ops = &amdgpu_dm_audio_component_ops;
954         acomp->dev = kdev;
955         adev->dm.audio_component = acomp;
956
957         return 0;
958 }
959
960 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
961                                           struct device *hda_kdev, void *data)
962 {
963         struct drm_device *dev = dev_get_drvdata(kdev);
964         struct amdgpu_device *adev = drm_to_adev(dev);
965         struct drm_audio_component *acomp = data;
966
967         acomp->ops = NULL;
968         acomp->dev = NULL;
969         adev->dm.audio_component = NULL;
970 }
971
972 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
973         .bind   = amdgpu_dm_audio_component_bind,
974         .unbind = amdgpu_dm_audio_component_unbind,
975 };
976
977 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
978 {
979         int i, ret;
980
981         if (!amdgpu_audio)
982                 return 0;
983
984         adev->mode_info.audio.enabled = true;
985
986         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
987
988         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
989                 adev->mode_info.audio.pin[i].channels = -1;
990                 adev->mode_info.audio.pin[i].rate = -1;
991                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
992                 adev->mode_info.audio.pin[i].status_bits = 0;
993                 adev->mode_info.audio.pin[i].category_code = 0;
994                 adev->mode_info.audio.pin[i].connected = false;
995                 adev->mode_info.audio.pin[i].id =
996                         adev->dm.dc->res_pool->audios[i]->inst;
997                 adev->mode_info.audio.pin[i].offset = 0;
998         }
999
1000         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001         if (ret < 0)
1002                 return ret;
1003
1004         adev->dm.audio_registered = true;
1005
1006         return 0;
1007 }
1008
1009 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1010 {
1011         if (!amdgpu_audio)
1012                 return;
1013
1014         if (!adev->mode_info.audio.enabled)
1015                 return;
1016
1017         if (adev->dm.audio_registered) {
1018                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1019                 adev->dm.audio_registered = false;
1020         }
1021
1022         /* TODO: Disable audio? */
1023
1024         adev->mode_info.audio.enabled = false;
1025 }
1026
1027 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1028 {
1029         struct drm_audio_component *acomp = adev->dm.audio_component;
1030
1031         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1032                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1033
1034                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1035                                                  pin, -1);
1036         }
1037 }
1038
1039 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1040 {
1041         const struct dmcub_firmware_header_v1_0 *hdr;
1042         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1043         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1044         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1045         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1046         struct abm *abm = adev->dm.dc->res_pool->abm;
1047         struct dmub_srv_hw_params hw_params;
1048         enum dmub_status status;
1049         const unsigned char *fw_inst_const, *fw_bss_data;
1050         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1051         bool has_hw_support;
1052
1053         if (!dmub_srv)
1054                 /* DMUB isn't supported on the ASIC. */
1055                 return 0;
1056
1057         if (!fb_info) {
1058                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1059                 return -EINVAL;
1060         }
1061
1062         if (!dmub_fw) {
1063                 /* Firmware required for DMUB support. */
1064                 DRM_ERROR("No firmware provided for DMUB.\n");
1065                 return -EINVAL;
1066         }
1067
1068         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1069         if (status != DMUB_STATUS_OK) {
1070                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1071                 return -EINVAL;
1072         }
1073
1074         if (!has_hw_support) {
1075                 DRM_INFO("DMUB unsupported on ASIC\n");
1076                 return 0;
1077         }
1078
1079         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1080         status = dmub_srv_hw_reset(dmub_srv);
1081         if (status != DMUB_STATUS_OK)
1082                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1083
1084         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1085
1086         fw_inst_const = dmub_fw->data +
1087                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1088                         PSP_HEADER_BYTES;
1089
1090         fw_bss_data = dmub_fw->data +
1091                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1092                       le32_to_cpu(hdr->inst_const_bytes);
1093
1094         /* Copy firmware and bios info into FB memory. */
1095         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1096                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1097
1098         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1099
1100         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1101          * amdgpu_ucode_init_single_fw will load dmub firmware
1102          * fw_inst_const part to cw0; otherwise, the firmware back door load
1103          * will be done by dm_dmub_hw_init
1104          */
1105         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1106                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1107                                 fw_inst_const_size);
1108         }
1109
1110         if (fw_bss_data_size)
1111                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1112                        fw_bss_data, fw_bss_data_size);
1113
1114         /* Copy firmware bios info into FB memory. */
1115         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1116                adev->bios_size);
1117
1118         /* Reset regions that need to be reset. */
1119         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1120         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1121
1122         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1123                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1124
1125         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1126                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1127
1128         /* Initialize hardware. */
1129         memset(&hw_params, 0, sizeof(hw_params));
1130         hw_params.fb_base = adev->gmc.fb_start;
1131         hw_params.fb_offset = adev->gmc.aper_base;
1132
1133         /* backdoor load firmware and trigger dmub running */
1134         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1135                 hw_params.load_inst_const = true;
1136
1137         if (dmcu)
1138                 hw_params.psp_version = dmcu->psp_version;
1139
1140         for (i = 0; i < fb_info->num_fb; ++i)
1141                 hw_params.fb[i] = &fb_info->fb[i];
1142
1143         switch (adev->ip_versions[DCE_HWIP][0]) {
1144         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1145                 hw_params.dpia_supported = true;
1146                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1147                 break;
1148         default:
1149                 break;
1150         }
1151
1152         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1153         if (status != DMUB_STATUS_OK) {
1154                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1155                 return -EINVAL;
1156         }
1157
1158         /* Wait for firmware load to finish. */
1159         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1160         if (status != DMUB_STATUS_OK)
1161                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1162
1163         /* Init DMCU and ABM if available. */
1164         if (dmcu && abm) {
1165                 dmcu->funcs->dmcu_init(dmcu);
1166                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1167         }
1168
1169         if (!adev->dm.dc->ctx->dmub_srv)
1170                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1171         if (!adev->dm.dc->ctx->dmub_srv) {
1172                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1173                 return -ENOMEM;
1174         }
1175
1176         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1177                  adev->dm.dmcub_fw_version);
1178
1179         return 0;
1180 }
1181
1182 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1183 {
1184         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1185         enum dmub_status status;
1186         bool init;
1187
1188         if (!dmub_srv) {
1189                 /* DMUB isn't supported on the ASIC. */
1190                 return;
1191         }
1192
1193         status = dmub_srv_is_hw_init(dmub_srv, &init);
1194         if (status != DMUB_STATUS_OK)
1195                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1196
1197         if (status == DMUB_STATUS_OK && init) {
1198                 /* Wait for firmware load to finish. */
1199                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1200                 if (status != DMUB_STATUS_OK)
1201                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1202         } else {
1203                 /* Perform the full hardware initialization. */
1204                 dm_dmub_hw_init(adev);
1205         }
1206 }
1207
1208 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1209 {
1210         uint64_t pt_base;
1211         uint32_t logical_addr_low;
1212         uint32_t logical_addr_high;
1213         uint32_t agp_base, agp_bot, agp_top;
1214         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1215
1216         memset(pa_config, 0, sizeof(*pa_config));
1217
1218         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1219         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1220
1221         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1222                 /*
1223                  * Raven2 has a HW issue that it is unable to use the vram which
1224                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1225                  * workaround that increase system aperture high address (add 1)
1226                  * to get rid of the VM fault and hardware hang.
1227                  */
1228                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1229         else
1230                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1231
1232         agp_base = 0;
1233         agp_bot = adev->gmc.agp_start >> 24;
1234         agp_top = adev->gmc.agp_end >> 24;
1235
1236
1237         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1238         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1239         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1240         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1241         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1242         page_table_base.low_part = lower_32_bits(pt_base);
1243
1244         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1245         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1246
1247         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1248         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1249         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1250
1251         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1252         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1253         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1254
1255         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1256         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1257         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1258
1259         pa_config->is_hvm_enabled = 0;
1260
1261 }
1262
1263 static void vblank_control_worker(struct work_struct *work)
1264 {
1265         struct vblank_control_work *vblank_work =
1266                 container_of(work, struct vblank_control_work, work);
1267         struct amdgpu_display_manager *dm = vblank_work->dm;
1268
1269         mutex_lock(&dm->dc_lock);
1270
1271         if (vblank_work->enable)
1272                 dm->active_vblank_irq_count++;
1273         else if(dm->active_vblank_irq_count)
1274                 dm->active_vblank_irq_count--;
1275
1276         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1277
1278         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1279
1280         /* Control PSR based on vblank requirements from OS */
1281         if (vblank_work->stream && vblank_work->stream->link) {
1282                 if (vblank_work->enable) {
1283                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1284                                 amdgpu_dm_psr_disable(vblank_work->stream);
1285                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1286                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1287                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1288                         amdgpu_dm_psr_enable(vblank_work->stream);
1289                 }
1290         }
1291
1292         mutex_unlock(&dm->dc_lock);
1293
1294         dc_stream_release(vblank_work->stream);
1295
1296         kfree(vblank_work);
1297 }
1298
1299 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1300 {
1301         struct hpd_rx_irq_offload_work *offload_work;
1302         struct amdgpu_dm_connector *aconnector;
1303         struct dc_link *dc_link;
1304         struct amdgpu_device *adev;
1305         enum dc_connection_type new_connection_type = dc_connection_none;
1306         unsigned long flags;
1307
1308         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1309         aconnector = offload_work->offload_wq->aconnector;
1310
1311         if (!aconnector) {
1312                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1313                 goto skip;
1314         }
1315
1316         adev = drm_to_adev(aconnector->base.dev);
1317         dc_link = aconnector->dc_link;
1318
1319         mutex_lock(&aconnector->hpd_lock);
1320         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1321                 DRM_ERROR("KMS: Failed to detect connector\n");
1322         mutex_unlock(&aconnector->hpd_lock);
1323
1324         if (new_connection_type == dc_connection_none)
1325                 goto skip;
1326
1327         if (amdgpu_in_reset(adev))
1328                 goto skip;
1329
1330         mutex_lock(&adev->dm.dc_lock);
1331         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1332                 dc_link_dp_handle_automated_test(dc_link);
1333         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1334                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1335                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1336                 dc_link_dp_handle_link_loss(dc_link);
1337                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1338                 offload_work->offload_wq->is_handling_link_loss = false;
1339                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1340         }
1341         mutex_unlock(&adev->dm.dc_lock);
1342
1343 skip:
1344         kfree(offload_work);
1345
1346 }
1347
1348 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1349 {
1350         int max_caps = dc->caps.max_links;
1351         int i = 0;
1352         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1353
1354         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1355
1356         if (!hpd_rx_offload_wq)
1357                 return NULL;
1358
1359
1360         for (i = 0; i < max_caps; i++) {
1361                 hpd_rx_offload_wq[i].wq =
1362                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1363
1364                 if (hpd_rx_offload_wq[i].wq == NULL) {
1365                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1366                         return NULL;
1367                 }
1368
1369                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1370         }
1371
1372         return hpd_rx_offload_wq;
1373 }
1374
1375 struct amdgpu_stutter_quirk {
1376         u16 chip_vendor;
1377         u16 chip_device;
1378         u16 subsys_vendor;
1379         u16 subsys_device;
1380         u8 revision;
1381 };
1382
1383 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1384         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1385         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1386         { 0, 0, 0, 0, 0 },
1387 };
1388
1389 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1390 {
1391         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1392
1393         while (p && p->chip_device != 0) {
1394                 if (pdev->vendor == p->chip_vendor &&
1395                     pdev->device == p->chip_device &&
1396                     pdev->subsystem_vendor == p->subsys_vendor &&
1397                     pdev->subsystem_device == p->subsys_device &&
1398                     pdev->revision == p->revision) {
1399                         return true;
1400                 }
1401                 ++p;
1402         }
1403         return false;
1404 }
1405
1406 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1407         {
1408                 .matches = {
1409                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1410                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1411                 },
1412         },
1413         {
1414                 .matches = {
1415                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1416                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1417                 },
1418         },
1419         {
1420                 .matches = {
1421                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1422                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1423                 },
1424         },
1425         {}
1426 };
1427
1428 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1429 {
1430         const struct dmi_system_id *dmi_id;
1431
1432         dm->aux_hpd_discon_quirk = false;
1433
1434         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1435         if (dmi_id) {
1436                 dm->aux_hpd_discon_quirk = true;
1437                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1438         }
1439 }
1440
1441 static int amdgpu_dm_init(struct amdgpu_device *adev)
1442 {
1443         struct dc_init_data init_data;
1444 #ifdef CONFIG_DRM_AMD_DC_HDCP
1445         struct dc_callback_init init_params;
1446 #endif
1447         int r;
1448
1449         adev->dm.ddev = adev_to_drm(adev);
1450         adev->dm.adev = adev;
1451
1452         /* Zero all the fields */
1453         memset(&init_data, 0, sizeof(init_data));
1454 #ifdef CONFIG_DRM_AMD_DC_HDCP
1455         memset(&init_params, 0, sizeof(init_params));
1456 #endif
1457
1458         mutex_init(&adev->dm.dc_lock);
1459         mutex_init(&adev->dm.audio_lock);
1460         spin_lock_init(&adev->dm.vblank_lock);
1461
1462         if(amdgpu_dm_irq_init(adev)) {
1463                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1464                 goto error;
1465         }
1466
1467         init_data.asic_id.chip_family = adev->family;
1468
1469         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1470         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1471         init_data.asic_id.chip_id = adev->pdev->device;
1472
1473         init_data.asic_id.vram_width = adev->gmc.vram_width;
1474         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1475         init_data.asic_id.atombios_base_address =
1476                 adev->mode_info.atom_context->bios;
1477
1478         init_data.driver = adev;
1479
1480         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1481
1482         if (!adev->dm.cgs_device) {
1483                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1484                 goto error;
1485         }
1486
1487         init_data.cgs_device = adev->dm.cgs_device;
1488
1489         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1490
1491         switch (adev->ip_versions[DCE_HWIP][0]) {
1492         case IP_VERSION(2, 1, 0):
1493                 switch (adev->dm.dmcub_fw_version) {
1494                 case 0: /* development */
1495                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1496                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1497                         init_data.flags.disable_dmcu = false;
1498                         break;
1499                 default:
1500                         init_data.flags.disable_dmcu = true;
1501                 }
1502                 break;
1503         case IP_VERSION(2, 0, 3):
1504                 init_data.flags.disable_dmcu = true;
1505                 break;
1506         default:
1507                 break;
1508         }
1509
1510         switch (adev->asic_type) {
1511         case CHIP_CARRIZO:
1512         case CHIP_STONEY:
1513                 init_data.flags.gpu_vm_support = true;
1514                 break;
1515         default:
1516                 switch (adev->ip_versions[DCE_HWIP][0]) {
1517                 case IP_VERSION(1, 0, 0):
1518                 case IP_VERSION(1, 0, 1):
1519                         /* enable S/G on PCO and RV2 */
1520                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1521                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1522                                 init_data.flags.gpu_vm_support = true;
1523                         break;
1524                 case IP_VERSION(2, 1, 0):
1525                 case IP_VERSION(3, 0, 1):
1526                 case IP_VERSION(3, 1, 2):
1527                 case IP_VERSION(3, 1, 3):
1528                 case IP_VERSION(3, 1, 5):
1529                 case IP_VERSION(3, 1, 6):
1530                         init_data.flags.gpu_vm_support = true;
1531                         break;
1532                 default:
1533                         break;
1534                 }
1535                 break;
1536         }
1537
1538         if (init_data.flags.gpu_vm_support)
1539                 adev->mode_info.gpu_vm_support = true;
1540
1541         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1542                 init_data.flags.fbc_support = true;
1543
1544         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1545                 init_data.flags.multi_mon_pp_mclk_switch = true;
1546
1547         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1548                 init_data.flags.disable_fractional_pwm = true;
1549
1550         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1551                 init_data.flags.edp_no_power_sequencing = true;
1552
1553         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1554                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1555         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1556                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1557
1558         init_data.flags.seamless_boot_edp_requested = false;
1559
1560         if (check_seamless_boot_capability(adev)) {
1561                 init_data.flags.seamless_boot_edp_requested = true;
1562                 init_data.flags.allow_seamless_boot_optimization = true;
1563                 DRM_INFO("Seamless boot condition check passed\n");
1564         }
1565
1566         INIT_LIST_HEAD(&adev->dm.da_list);
1567
1568         retrieve_dmi_info(&adev->dm);
1569
1570         /* Display Core create. */
1571         adev->dm.dc = dc_create(&init_data);
1572
1573         if (adev->dm.dc) {
1574                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1575         } else {
1576                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1577                 goto error;
1578         }
1579
1580         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1581                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1582                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1583         }
1584
1585         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1586                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1587         if (dm_should_disable_stutter(adev->pdev))
1588                 adev->dm.dc->debug.disable_stutter = true;
1589
1590         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1591                 adev->dm.dc->debug.disable_stutter = true;
1592
1593         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1594                 adev->dm.dc->debug.disable_dsc = true;
1595                 adev->dm.dc->debug.disable_dsc_edp = true;
1596         }
1597
1598         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1599                 adev->dm.dc->debug.disable_clock_gate = true;
1600
1601         r = dm_dmub_hw_init(adev);
1602         if (r) {
1603                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1604                 goto error;
1605         }
1606
1607         dc_hardware_init(adev->dm.dc);
1608
1609         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1610         if (!adev->dm.hpd_rx_offload_wq) {
1611                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1612                 goto error;
1613         }
1614
1615         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1616                 struct dc_phy_addr_space_config pa_config;
1617
1618                 mmhub_read_system_context(adev, &pa_config);
1619
1620                 // Call the DC init_memory func
1621                 dc_setup_system_context(adev->dm.dc, &pa_config);
1622         }
1623
1624         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1625         if (!adev->dm.freesync_module) {
1626                 DRM_ERROR(
1627                 "amdgpu: failed to initialize freesync_module.\n");
1628         } else
1629                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1630                                 adev->dm.freesync_module);
1631
1632         amdgpu_dm_init_color_mod();
1633
1634         if (adev->dm.dc->caps.max_links > 0) {
1635                 adev->dm.vblank_control_workqueue =
1636                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1637                 if (!adev->dm.vblank_control_workqueue)
1638                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1639         }
1640
1641 #ifdef CONFIG_DRM_AMD_DC_HDCP
1642         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1643                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1644
1645                 if (!adev->dm.hdcp_workqueue)
1646                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1647                 else
1648                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1649
1650                 dc_init_callbacks(adev->dm.dc, &init_params);
1651         }
1652 #endif
1653 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1654         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1655 #endif
1656         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1657                 init_completion(&adev->dm.dmub_aux_transfer_done);
1658                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1659                 if (!adev->dm.dmub_notify) {
1660                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1661                         goto error;
1662                 }
1663
1664                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1665                 if (!adev->dm.delayed_hpd_wq) {
1666                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1667                         goto error;
1668                 }
1669
1670                 amdgpu_dm_outbox_init(adev);
1671                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1672                         dmub_aux_setconfig_callback, false)) {
1673                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1674                         goto error;
1675                 }
1676                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1677                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1678                         goto error;
1679                 }
1680                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1681                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1682                         goto error;
1683                 }
1684         }
1685
1686         if (amdgpu_dm_initialize_drm_device(adev)) {
1687                 DRM_ERROR(
1688                 "amdgpu: failed to initialize sw for display support.\n");
1689                 goto error;
1690         }
1691
1692         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1693          * It is expected that DMUB will resend any pending notifications at this point, for
1694          * example HPD from DPIA.
1695          */
1696         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1697                 dc_enable_dmub_outbox(adev->dm.dc);
1698
1699         /* create fake encoders for MST */
1700         dm_dp_create_fake_mst_encoders(adev);
1701
1702         /* TODO: Add_display_info? */
1703
1704         /* TODO use dynamic cursor width */
1705         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1706         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1707
1708         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1709                 DRM_ERROR(
1710                 "amdgpu: failed to initialize sw for display support.\n");
1711                 goto error;
1712         }
1713
1714
1715         DRM_DEBUG_DRIVER("KMS initialized.\n");
1716
1717         return 0;
1718 error:
1719         amdgpu_dm_fini(adev);
1720
1721         return -EINVAL;
1722 }
1723
1724 static int amdgpu_dm_early_fini(void *handle)
1725 {
1726         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1727
1728         amdgpu_dm_audio_fini(adev);
1729
1730         return 0;
1731 }
1732
1733 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1734 {
1735         int i;
1736
1737         if (adev->dm.vblank_control_workqueue) {
1738                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1739                 adev->dm.vblank_control_workqueue = NULL;
1740         }
1741
1742         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1743                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1744         }
1745
1746         amdgpu_dm_destroy_drm_device(&adev->dm);
1747
1748 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1749         if (adev->dm.crc_rd_wrk) {
1750                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1751                 kfree(adev->dm.crc_rd_wrk);
1752                 adev->dm.crc_rd_wrk = NULL;
1753         }
1754 #endif
1755 #ifdef CONFIG_DRM_AMD_DC_HDCP
1756         if (adev->dm.hdcp_workqueue) {
1757                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1758                 adev->dm.hdcp_workqueue = NULL;
1759         }
1760
1761         if (adev->dm.dc)
1762                 dc_deinit_callbacks(adev->dm.dc);
1763 #endif
1764
1765         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1766
1767         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1768                 kfree(adev->dm.dmub_notify);
1769                 adev->dm.dmub_notify = NULL;
1770                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1771                 adev->dm.delayed_hpd_wq = NULL;
1772         }
1773
1774         if (adev->dm.dmub_bo)
1775                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1776                                       &adev->dm.dmub_bo_gpu_addr,
1777                                       &adev->dm.dmub_bo_cpu_addr);
1778
1779         if (adev->dm.hpd_rx_offload_wq) {
1780                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1781                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1782                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1783                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1784                         }
1785                 }
1786
1787                 kfree(adev->dm.hpd_rx_offload_wq);
1788                 adev->dm.hpd_rx_offload_wq = NULL;
1789         }
1790
1791         /* DC Destroy TODO: Replace destroy DAL */
1792         if (adev->dm.dc)
1793                 dc_destroy(&adev->dm.dc);
1794         /*
1795          * TODO: pageflip, vlank interrupt
1796          *
1797          * amdgpu_dm_irq_fini(adev);
1798          */
1799
1800         if (adev->dm.cgs_device) {
1801                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1802                 adev->dm.cgs_device = NULL;
1803         }
1804         if (adev->dm.freesync_module) {
1805                 mod_freesync_destroy(adev->dm.freesync_module);
1806                 adev->dm.freesync_module = NULL;
1807         }
1808
1809         mutex_destroy(&adev->dm.audio_lock);
1810         mutex_destroy(&adev->dm.dc_lock);
1811
1812         return;
1813 }
1814
1815 static int load_dmcu_fw(struct amdgpu_device *adev)
1816 {
1817         const char *fw_name_dmcu = NULL;
1818         int r;
1819         const struct dmcu_firmware_header_v1_0 *hdr;
1820
1821         switch(adev->asic_type) {
1822 #if defined(CONFIG_DRM_AMD_DC_SI)
1823         case CHIP_TAHITI:
1824         case CHIP_PITCAIRN:
1825         case CHIP_VERDE:
1826         case CHIP_OLAND:
1827 #endif
1828         case CHIP_BONAIRE:
1829         case CHIP_HAWAII:
1830         case CHIP_KAVERI:
1831         case CHIP_KABINI:
1832         case CHIP_MULLINS:
1833         case CHIP_TONGA:
1834         case CHIP_FIJI:
1835         case CHIP_CARRIZO:
1836         case CHIP_STONEY:
1837         case CHIP_POLARIS11:
1838         case CHIP_POLARIS10:
1839         case CHIP_POLARIS12:
1840         case CHIP_VEGAM:
1841         case CHIP_VEGA10:
1842         case CHIP_VEGA12:
1843         case CHIP_VEGA20:
1844                 return 0;
1845         case CHIP_NAVI12:
1846                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1847                 break;
1848         case CHIP_RAVEN:
1849                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1850                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1851                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1852                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1853                 else
1854                         return 0;
1855                 break;
1856         default:
1857                 switch (adev->ip_versions[DCE_HWIP][0]) {
1858                 case IP_VERSION(2, 0, 2):
1859                 case IP_VERSION(2, 0, 3):
1860                 case IP_VERSION(2, 0, 0):
1861                 case IP_VERSION(2, 1, 0):
1862                 case IP_VERSION(3, 0, 0):
1863                 case IP_VERSION(3, 0, 2):
1864                 case IP_VERSION(3, 0, 3):
1865                 case IP_VERSION(3, 0, 1):
1866                 case IP_VERSION(3, 1, 2):
1867                 case IP_VERSION(3, 1, 3):
1868                 case IP_VERSION(3, 1, 5):
1869                 case IP_VERSION(3, 1, 6):
1870                         return 0;
1871                 default:
1872                         break;
1873                 }
1874                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1875                 return -EINVAL;
1876         }
1877
1878         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1879                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1880                 return 0;
1881         }
1882
1883         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1884         if (r == -ENOENT) {
1885                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1886                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1887                 adev->dm.fw_dmcu = NULL;
1888                 return 0;
1889         }
1890         if (r) {
1891                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1892                         fw_name_dmcu);
1893                 return r;
1894         }
1895
1896         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1897         if (r) {
1898                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1899                         fw_name_dmcu);
1900                 release_firmware(adev->dm.fw_dmcu);
1901                 adev->dm.fw_dmcu = NULL;
1902                 return r;
1903         }
1904
1905         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1906         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1907         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1908         adev->firmware.fw_size +=
1909                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1910
1911         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1912         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1913         adev->firmware.fw_size +=
1914                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1915
1916         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1917
1918         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1919
1920         return 0;
1921 }
1922
1923 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1924 {
1925         struct amdgpu_device *adev = ctx;
1926
1927         return dm_read_reg(adev->dm.dc->ctx, address);
1928 }
1929
1930 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1931                                      uint32_t value)
1932 {
1933         struct amdgpu_device *adev = ctx;
1934
1935         return dm_write_reg(adev->dm.dc->ctx, address, value);
1936 }
1937
1938 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1939 {
1940         struct dmub_srv_create_params create_params;
1941         struct dmub_srv_region_params region_params;
1942         struct dmub_srv_region_info region_info;
1943         struct dmub_srv_fb_params fb_params;
1944         struct dmub_srv_fb_info *fb_info;
1945         struct dmub_srv *dmub_srv;
1946         const struct dmcub_firmware_header_v1_0 *hdr;
1947         const char *fw_name_dmub;
1948         enum dmub_asic dmub_asic;
1949         enum dmub_status status;
1950         int r;
1951
1952         switch (adev->ip_versions[DCE_HWIP][0]) {
1953         case IP_VERSION(2, 1, 0):
1954                 dmub_asic = DMUB_ASIC_DCN21;
1955                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1956                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1957                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1958                 break;
1959         case IP_VERSION(3, 0, 0):
1960                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1961                         dmub_asic = DMUB_ASIC_DCN30;
1962                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1963                 } else {
1964                         dmub_asic = DMUB_ASIC_DCN30;
1965                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1966                 }
1967                 break;
1968         case IP_VERSION(3, 0, 1):
1969                 dmub_asic = DMUB_ASIC_DCN301;
1970                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1971                 break;
1972         case IP_VERSION(3, 0, 2):
1973                 dmub_asic = DMUB_ASIC_DCN302;
1974                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1975                 break;
1976         case IP_VERSION(3, 0, 3):
1977                 dmub_asic = DMUB_ASIC_DCN303;
1978                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1979                 break;
1980         case IP_VERSION(3, 1, 2):
1981         case IP_VERSION(3, 1, 3):
1982                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1983                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1984                 break;
1985         case IP_VERSION(3, 1, 5):
1986                 dmub_asic = DMUB_ASIC_DCN315;
1987                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1988                 break;
1989         case IP_VERSION(3, 1, 6):
1990                 dmub_asic = DMUB_ASIC_DCN316;
1991                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1992                 break;
1993         default:
1994                 /* ASIC doesn't support DMUB. */
1995                 return 0;
1996         }
1997
1998         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1999         if (r) {
2000                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2001                 return 0;
2002         }
2003
2004         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2005         if (r) {
2006                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2007                 return 0;
2008         }
2009
2010         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2011         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2012
2013         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2014                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2015                         AMDGPU_UCODE_ID_DMCUB;
2016                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2017                         adev->dm.dmub_fw;
2018                 adev->firmware.fw_size +=
2019                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2020
2021                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2022                          adev->dm.dmcub_fw_version);
2023         }
2024
2025
2026         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2027         dmub_srv = adev->dm.dmub_srv;
2028
2029         if (!dmub_srv) {
2030                 DRM_ERROR("Failed to allocate DMUB service!\n");
2031                 return -ENOMEM;
2032         }
2033
2034         memset(&create_params, 0, sizeof(create_params));
2035         create_params.user_ctx = adev;
2036         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2037         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2038         create_params.asic = dmub_asic;
2039
2040         /* Create the DMUB service. */
2041         status = dmub_srv_create(dmub_srv, &create_params);
2042         if (status != DMUB_STATUS_OK) {
2043                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2044                 return -EINVAL;
2045         }
2046
2047         /* Calculate the size of all the regions for the DMUB service. */
2048         memset(&region_params, 0, sizeof(region_params));
2049
2050         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2051                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2052         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2053         region_params.vbios_size = adev->bios_size;
2054         region_params.fw_bss_data = region_params.bss_data_size ?
2055                 adev->dm.dmub_fw->data +
2056                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2057                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2058         region_params.fw_inst_const =
2059                 adev->dm.dmub_fw->data +
2060                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2061                 PSP_HEADER_BYTES;
2062
2063         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2064                                            &region_info);
2065
2066         if (status != DMUB_STATUS_OK) {
2067                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2068                 return -EINVAL;
2069         }
2070
2071         /*
2072          * Allocate a framebuffer based on the total size of all the regions.
2073          * TODO: Move this into GART.
2074          */
2075         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2076                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2077                                     &adev->dm.dmub_bo_gpu_addr,
2078                                     &adev->dm.dmub_bo_cpu_addr);
2079         if (r)
2080                 return r;
2081
2082         /* Rebase the regions on the framebuffer address. */
2083         memset(&fb_params, 0, sizeof(fb_params));
2084         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2085         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2086         fb_params.region_info = &region_info;
2087
2088         adev->dm.dmub_fb_info =
2089                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2090         fb_info = adev->dm.dmub_fb_info;
2091
2092         if (!fb_info) {
2093                 DRM_ERROR(
2094                         "Failed to allocate framebuffer info for DMUB service!\n");
2095                 return -ENOMEM;
2096         }
2097
2098         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2099         if (status != DMUB_STATUS_OK) {
2100                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2101                 return -EINVAL;
2102         }
2103
2104         return 0;
2105 }
2106
2107 static int dm_sw_init(void *handle)
2108 {
2109         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2110         int r;
2111
2112         r = dm_dmub_sw_init(adev);
2113         if (r)
2114                 return r;
2115
2116         return load_dmcu_fw(adev);
2117 }
2118
2119 static int dm_sw_fini(void *handle)
2120 {
2121         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2122
2123         kfree(adev->dm.dmub_fb_info);
2124         adev->dm.dmub_fb_info = NULL;
2125
2126         if (adev->dm.dmub_srv) {
2127                 dmub_srv_destroy(adev->dm.dmub_srv);
2128                 adev->dm.dmub_srv = NULL;
2129         }
2130
2131         release_firmware(adev->dm.dmub_fw);
2132         adev->dm.dmub_fw = NULL;
2133
2134         release_firmware(adev->dm.fw_dmcu);
2135         adev->dm.fw_dmcu = NULL;
2136
2137         return 0;
2138 }
2139
2140 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2141 {
2142         struct amdgpu_dm_connector *aconnector;
2143         struct drm_connector *connector;
2144         struct drm_connector_list_iter iter;
2145         int ret = 0;
2146
2147         drm_connector_list_iter_begin(dev, &iter);
2148         drm_for_each_connector_iter(connector, &iter) {
2149                 aconnector = to_amdgpu_dm_connector(connector);
2150                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2151                     aconnector->mst_mgr.aux) {
2152                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2153                                          aconnector,
2154                                          aconnector->base.base.id);
2155
2156                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2157                         if (ret < 0) {
2158                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2159                                 aconnector->dc_link->type =
2160                                         dc_connection_single;
2161                                 break;
2162                         }
2163                 }
2164         }
2165         drm_connector_list_iter_end(&iter);
2166
2167         return ret;
2168 }
2169
2170 static int dm_late_init(void *handle)
2171 {
2172         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2173
2174         struct dmcu_iram_parameters params;
2175         unsigned int linear_lut[16];
2176         int i;
2177         struct dmcu *dmcu = NULL;
2178
2179         dmcu = adev->dm.dc->res_pool->dmcu;
2180
2181         for (i = 0; i < 16; i++)
2182                 linear_lut[i] = 0xFFFF * i / 15;
2183
2184         params.set = 0;
2185         params.backlight_ramping_override = false;
2186         params.backlight_ramping_start = 0xCCCC;
2187         params.backlight_ramping_reduction = 0xCCCCCCCC;
2188         params.backlight_lut_array_size = 16;
2189         params.backlight_lut_array = linear_lut;
2190
2191         /* Min backlight level after ABM reduction,  Don't allow below 1%
2192          * 0xFFFF x 0.01 = 0x28F
2193          */
2194         params.min_abm_backlight = 0x28F;
2195         /* In the case where abm is implemented on dmcub,
2196         * dmcu object will be null.
2197         * ABM 2.4 and up are implemented on dmcub.
2198         */
2199         if (dmcu) {
2200                 if (!dmcu_load_iram(dmcu, params))
2201                         return -EINVAL;
2202         } else if (adev->dm.dc->ctx->dmub_srv) {
2203                 struct dc_link *edp_links[MAX_NUM_EDP];
2204                 int edp_num;
2205
2206                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2207                 for (i = 0; i < edp_num; i++) {
2208                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2209                                 return -EINVAL;
2210                 }
2211         }
2212
2213         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2214 }
2215
2216 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2217 {
2218         struct amdgpu_dm_connector *aconnector;
2219         struct drm_connector *connector;
2220         struct drm_connector_list_iter iter;
2221         struct drm_dp_mst_topology_mgr *mgr;
2222         int ret;
2223         bool need_hotplug = false;
2224
2225         drm_connector_list_iter_begin(dev, &iter);
2226         drm_for_each_connector_iter(connector, &iter) {
2227                 aconnector = to_amdgpu_dm_connector(connector);
2228                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2229                     aconnector->mst_port)
2230                         continue;
2231
2232                 mgr = &aconnector->mst_mgr;
2233
2234                 if (suspend) {
2235                         drm_dp_mst_topology_mgr_suspend(mgr);
2236                 } else {
2237                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2238                         if (ret < 0) {
2239                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2240                                 need_hotplug = true;
2241                         }
2242                 }
2243         }
2244         drm_connector_list_iter_end(&iter);
2245
2246         if (need_hotplug)
2247                 drm_kms_helper_hotplug_event(dev);
2248 }
2249
2250 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2251 {
2252         int ret = 0;
2253
2254         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2255          * on window driver dc implementation.
2256          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2257          * should be passed to smu during boot up and resume from s3.
2258          * boot up: dc calculate dcn watermark clock settings within dc_create,
2259          * dcn20_resource_construct
2260          * then call pplib functions below to pass the settings to smu:
2261          * smu_set_watermarks_for_clock_ranges
2262          * smu_set_watermarks_table
2263          * navi10_set_watermarks_table
2264          * smu_write_watermarks_table
2265          *
2266          * For Renoir, clock settings of dcn watermark are also fixed values.
2267          * dc has implemented different flow for window driver:
2268          * dc_hardware_init / dc_set_power_state
2269          * dcn10_init_hw
2270          * notify_wm_ranges
2271          * set_wm_ranges
2272          * -- Linux
2273          * smu_set_watermarks_for_clock_ranges
2274          * renoir_set_watermarks_table
2275          * smu_write_watermarks_table
2276          *
2277          * For Linux,
2278          * dc_hardware_init -> amdgpu_dm_init
2279          * dc_set_power_state --> dm_resume
2280          *
2281          * therefore, this function apply to navi10/12/14 but not Renoir
2282          * *
2283          */
2284         switch (adev->ip_versions[DCE_HWIP][0]) {
2285         case IP_VERSION(2, 0, 2):
2286         case IP_VERSION(2, 0, 0):
2287                 break;
2288         default:
2289                 return 0;
2290         }
2291
2292         ret = amdgpu_dpm_write_watermarks_table(adev);
2293         if (ret) {
2294                 DRM_ERROR("Failed to update WMTABLE!\n");
2295                 return ret;
2296         }
2297
2298         return 0;
2299 }
2300
2301 /**
2302  * dm_hw_init() - Initialize DC device
2303  * @handle: The base driver device containing the amdgpu_dm device.
2304  *
2305  * Initialize the &struct amdgpu_display_manager device. This involves calling
2306  * the initializers of each DM component, then populating the struct with them.
2307  *
2308  * Although the function implies hardware initialization, both hardware and
2309  * software are initialized here. Splitting them out to their relevant init
2310  * hooks is a future TODO item.
2311  *
2312  * Some notable things that are initialized here:
2313  *
2314  * - Display Core, both software and hardware
2315  * - DC modules that we need (freesync and color management)
2316  * - DRM software states
2317  * - Interrupt sources and handlers
2318  * - Vblank support
2319  * - Debug FS entries, if enabled
2320  */
2321 static int dm_hw_init(void *handle)
2322 {
2323         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2324         /* Create DAL display manager */
2325         amdgpu_dm_init(adev);
2326         amdgpu_dm_hpd_init(adev);
2327
2328         return 0;
2329 }
2330
2331 /**
2332  * dm_hw_fini() - Teardown DC device
2333  * @handle: The base driver device containing the amdgpu_dm device.
2334  *
2335  * Teardown components within &struct amdgpu_display_manager that require
2336  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2337  * were loaded. Also flush IRQ workqueues and disable them.
2338  */
2339 static int dm_hw_fini(void *handle)
2340 {
2341         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2342
2343         amdgpu_dm_hpd_fini(adev);
2344
2345         amdgpu_dm_irq_fini(adev);
2346         amdgpu_dm_fini(adev);
2347         return 0;
2348 }
2349
2350
2351 static int dm_enable_vblank(struct drm_crtc *crtc);
2352 static void dm_disable_vblank(struct drm_crtc *crtc);
2353
2354 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2355                                  struct dc_state *state, bool enable)
2356 {
2357         enum dc_irq_source irq_source;
2358         struct amdgpu_crtc *acrtc;
2359         int rc = -EBUSY;
2360         int i = 0;
2361
2362         for (i = 0; i < state->stream_count; i++) {
2363                 acrtc = get_crtc_by_otg_inst(
2364                                 adev, state->stream_status[i].primary_otg_inst);
2365
2366                 if (acrtc && state->stream_status[i].plane_count != 0) {
2367                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2368                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2369                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2370                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2371                         if (rc)
2372                                 DRM_WARN("Failed to %s pflip interrupts\n",
2373                                          enable ? "enable" : "disable");
2374
2375                         if (enable) {
2376                                 rc = dm_enable_vblank(&acrtc->base);
2377                                 if (rc)
2378                                         DRM_WARN("Failed to enable vblank interrupts\n");
2379                         } else {
2380                                 dm_disable_vblank(&acrtc->base);
2381                         }
2382
2383                 }
2384         }
2385
2386 }
2387
2388 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2389 {
2390         struct dc_state *context = NULL;
2391         enum dc_status res = DC_ERROR_UNEXPECTED;
2392         int i;
2393         struct dc_stream_state *del_streams[MAX_PIPES];
2394         int del_streams_count = 0;
2395
2396         memset(del_streams, 0, sizeof(del_streams));
2397
2398         context = dc_create_state(dc);
2399         if (context == NULL)
2400                 goto context_alloc_fail;
2401
2402         dc_resource_state_copy_construct_current(dc, context);
2403
2404         /* First remove from context all streams */
2405         for (i = 0; i < context->stream_count; i++) {
2406                 struct dc_stream_state *stream = context->streams[i];
2407
2408                 del_streams[del_streams_count++] = stream;
2409         }
2410
2411         /* Remove all planes for removed streams and then remove the streams */
2412         for (i = 0; i < del_streams_count; i++) {
2413                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2414                         res = DC_FAIL_DETACH_SURFACES;
2415                         goto fail;
2416                 }
2417
2418                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2419                 if (res != DC_OK)
2420                         goto fail;
2421         }
2422
2423         res = dc_commit_state(dc, context);
2424
2425 fail:
2426         dc_release_state(context);
2427
2428 context_alloc_fail:
2429         return res;
2430 }
2431
2432 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2433 {
2434         int i;
2435
2436         if (dm->hpd_rx_offload_wq) {
2437                 for (i = 0; i < dm->dc->caps.max_links; i++)
2438                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2439         }
2440 }
2441
2442 static int dm_suspend(void *handle)
2443 {
2444         struct amdgpu_device *adev = handle;
2445         struct amdgpu_display_manager *dm = &adev->dm;
2446         int ret = 0;
2447
2448         if (amdgpu_in_reset(adev)) {
2449                 mutex_lock(&dm->dc_lock);
2450
2451                 dc_allow_idle_optimizations(adev->dm.dc, false);
2452
2453                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2454
2455                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2456
2457                 amdgpu_dm_commit_zero_streams(dm->dc);
2458
2459                 amdgpu_dm_irq_suspend(adev);
2460
2461                 hpd_rx_irq_work_suspend(dm);
2462
2463                 return ret;
2464         }
2465
2466         WARN_ON(adev->dm.cached_state);
2467         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2468
2469         s3_handle_mst(adev_to_drm(adev), true);
2470
2471         amdgpu_dm_irq_suspend(adev);
2472
2473         hpd_rx_irq_work_suspend(dm);
2474
2475         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2476
2477         return 0;
2478 }
2479
2480 struct amdgpu_dm_connector *
2481 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2482                                              struct drm_crtc *crtc)
2483 {
2484         uint32_t i;
2485         struct drm_connector_state *new_con_state;
2486         struct drm_connector *connector;
2487         struct drm_crtc *crtc_from_state;
2488
2489         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2490                 crtc_from_state = new_con_state->crtc;
2491
2492                 if (crtc_from_state == crtc)
2493                         return to_amdgpu_dm_connector(connector);
2494         }
2495
2496         return NULL;
2497 }
2498
2499 static void emulated_link_detect(struct dc_link *link)
2500 {
2501         struct dc_sink_init_data sink_init_data = { 0 };
2502         struct display_sink_capability sink_caps = { 0 };
2503         enum dc_edid_status edid_status;
2504         struct dc_context *dc_ctx = link->ctx;
2505         struct dc_sink *sink = NULL;
2506         struct dc_sink *prev_sink = NULL;
2507
2508         link->type = dc_connection_none;
2509         prev_sink = link->local_sink;
2510
2511         if (prev_sink)
2512                 dc_sink_release(prev_sink);
2513
2514         switch (link->connector_signal) {
2515         case SIGNAL_TYPE_HDMI_TYPE_A: {
2516                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2517                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2518                 break;
2519         }
2520
2521         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2522                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2523                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2524                 break;
2525         }
2526
2527         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2528                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2529                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2530                 break;
2531         }
2532
2533         case SIGNAL_TYPE_LVDS: {
2534                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2535                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2536                 break;
2537         }
2538
2539         case SIGNAL_TYPE_EDP: {
2540                 sink_caps.transaction_type =
2541                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2542                 sink_caps.signal = SIGNAL_TYPE_EDP;
2543                 break;
2544         }
2545
2546         case SIGNAL_TYPE_DISPLAY_PORT: {
2547                 sink_caps.transaction_type =
2548                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2549                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2550                 break;
2551         }
2552
2553         default:
2554                 DC_ERROR("Invalid connector type! signal:%d\n",
2555                         link->connector_signal);
2556                 return;
2557         }
2558
2559         sink_init_data.link = link;
2560         sink_init_data.sink_signal = sink_caps.signal;
2561
2562         sink = dc_sink_create(&sink_init_data);
2563         if (!sink) {
2564                 DC_ERROR("Failed to create sink!\n");
2565                 return;
2566         }
2567
2568         /* dc_sink_create returns a new reference */
2569         link->local_sink = sink;
2570
2571         edid_status = dm_helpers_read_local_edid(
2572                         link->ctx,
2573                         link,
2574                         sink);
2575
2576         if (edid_status != EDID_OK)
2577                 DC_ERROR("Failed to read EDID");
2578
2579 }
2580
2581 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2582                                      struct amdgpu_display_manager *dm)
2583 {
2584         struct {
2585                 struct dc_surface_update surface_updates[MAX_SURFACES];
2586                 struct dc_plane_info plane_infos[MAX_SURFACES];
2587                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2588                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2589                 struct dc_stream_update stream_update;
2590         } * bundle;
2591         int k, m;
2592
2593         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2594
2595         if (!bundle) {
2596                 dm_error("Failed to allocate update bundle\n");
2597                 goto cleanup;
2598         }
2599
2600         for (k = 0; k < dc_state->stream_count; k++) {
2601                 bundle->stream_update.stream = dc_state->streams[k];
2602
2603                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2604                         bundle->surface_updates[m].surface =
2605                                 dc_state->stream_status->plane_states[m];
2606                         bundle->surface_updates[m].surface->force_full_update =
2607                                 true;
2608                 }
2609                 dc_commit_updates_for_stream(
2610                         dm->dc, bundle->surface_updates,
2611                         dc_state->stream_status->plane_count,
2612                         dc_state->streams[k], &bundle->stream_update, dc_state);
2613         }
2614
2615 cleanup:
2616         kfree(bundle);
2617
2618         return;
2619 }
2620
2621 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2622 {
2623         struct dc_stream_state *stream_state;
2624         struct amdgpu_dm_connector *aconnector = link->priv;
2625         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2626         struct dc_stream_update stream_update;
2627         bool dpms_off = true;
2628
2629         memset(&stream_update, 0, sizeof(stream_update));
2630         stream_update.dpms_off = &dpms_off;
2631
2632         mutex_lock(&adev->dm.dc_lock);
2633         stream_state = dc_stream_find_from_link(link);
2634
2635         if (stream_state == NULL) {
2636                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2637                 mutex_unlock(&adev->dm.dc_lock);
2638                 return;
2639         }
2640
2641         stream_update.stream = stream_state;
2642         acrtc_state->force_dpms_off = true;
2643         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2644                                      stream_state, &stream_update,
2645                                      stream_state->ctx->dc->current_state);
2646         mutex_unlock(&adev->dm.dc_lock);
2647 }
2648
2649 static int dm_resume(void *handle)
2650 {
2651         struct amdgpu_device *adev = handle;
2652         struct drm_device *ddev = adev_to_drm(adev);
2653         struct amdgpu_display_manager *dm = &adev->dm;
2654         struct amdgpu_dm_connector *aconnector;
2655         struct drm_connector *connector;
2656         struct drm_connector_list_iter iter;
2657         struct drm_crtc *crtc;
2658         struct drm_crtc_state *new_crtc_state;
2659         struct dm_crtc_state *dm_new_crtc_state;
2660         struct drm_plane *plane;
2661         struct drm_plane_state *new_plane_state;
2662         struct dm_plane_state *dm_new_plane_state;
2663         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2664         enum dc_connection_type new_connection_type = dc_connection_none;
2665         struct dc_state *dc_state;
2666         int i, r, j;
2667
2668         if (amdgpu_in_reset(adev)) {
2669                 dc_state = dm->cached_dc_state;
2670
2671                 /*
2672                  * The dc->current_state is backed up into dm->cached_dc_state
2673                  * before we commit 0 streams.
2674                  *
2675                  * DC will clear link encoder assignments on the real state
2676                  * but the changes won't propagate over to the copy we made
2677                  * before the 0 streams commit.
2678                  *
2679                  * DC expects that link encoder assignments are *not* valid
2680                  * when committing a state, so as a workaround we can copy
2681                  * off of the current state.
2682                  *
2683                  * We lose the previous assignments, but we had already
2684                  * commit 0 streams anyway.
2685                  */
2686                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2687
2688                 r = dm_dmub_hw_init(adev);
2689                 if (r)
2690                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2691
2692                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2693                 dc_resume(dm->dc);
2694
2695                 amdgpu_dm_irq_resume_early(adev);
2696
2697                 for (i = 0; i < dc_state->stream_count; i++) {
2698                         dc_state->streams[i]->mode_changed = true;
2699                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2700                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2701                                         = 0xffffffff;
2702                         }
2703                 }
2704
2705                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2706                         amdgpu_dm_outbox_init(adev);
2707                         dc_enable_dmub_outbox(adev->dm.dc);
2708                 }
2709
2710                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2711
2712                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2713
2714                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2715
2716                 dc_release_state(dm->cached_dc_state);
2717                 dm->cached_dc_state = NULL;
2718
2719                 amdgpu_dm_irq_resume_late(adev);
2720
2721                 mutex_unlock(&dm->dc_lock);
2722
2723                 return 0;
2724         }
2725         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2726         dc_release_state(dm_state->context);
2727         dm_state->context = dc_create_state(dm->dc);
2728         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2729         dc_resource_state_construct(dm->dc, dm_state->context);
2730
2731         /* Before powering on DC we need to re-initialize DMUB. */
2732         dm_dmub_hw_resume(adev);
2733
2734         /* Re-enable outbox interrupts for DPIA. */
2735         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2736                 amdgpu_dm_outbox_init(adev);
2737                 dc_enable_dmub_outbox(adev->dm.dc);
2738         }
2739
2740         /* power on hardware */
2741         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2742
2743         /* program HPD filter */
2744         dc_resume(dm->dc);
2745
2746         /*
2747          * early enable HPD Rx IRQ, should be done before set mode as short
2748          * pulse interrupts are used for MST
2749          */
2750         amdgpu_dm_irq_resume_early(adev);
2751
2752         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2753         s3_handle_mst(ddev, false);
2754
2755         /* Do detection*/
2756         drm_connector_list_iter_begin(ddev, &iter);
2757         drm_for_each_connector_iter(connector, &iter) {
2758                 aconnector = to_amdgpu_dm_connector(connector);
2759
2760                 /*
2761                  * this is the case when traversing through already created
2762                  * MST connectors, should be skipped
2763                  */
2764                 if (aconnector->dc_link &&
2765                     aconnector->dc_link->type == dc_connection_mst_branch)
2766                         continue;
2767
2768                 mutex_lock(&aconnector->hpd_lock);
2769                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2770                         DRM_ERROR("KMS: Failed to detect connector\n");
2771
2772                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2773                         emulated_link_detect(aconnector->dc_link);
2774                 else
2775                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2776
2777                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2778                         aconnector->fake_enable = false;
2779
2780                 if (aconnector->dc_sink)
2781                         dc_sink_release(aconnector->dc_sink);
2782                 aconnector->dc_sink = NULL;
2783                 amdgpu_dm_update_connector_after_detect(aconnector);
2784                 mutex_unlock(&aconnector->hpd_lock);
2785         }
2786         drm_connector_list_iter_end(&iter);
2787
2788         /* Force mode set in atomic commit */
2789         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2790                 new_crtc_state->active_changed = true;
2791
2792         /*
2793          * atomic_check is expected to create the dc states. We need to release
2794          * them here, since they were duplicated as part of the suspend
2795          * procedure.
2796          */
2797         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2798                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2799                 if (dm_new_crtc_state->stream) {
2800                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2801                         dc_stream_release(dm_new_crtc_state->stream);
2802                         dm_new_crtc_state->stream = NULL;
2803                 }
2804         }
2805
2806         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2807                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2808                 if (dm_new_plane_state->dc_state) {
2809                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2810                         dc_plane_state_release(dm_new_plane_state->dc_state);
2811                         dm_new_plane_state->dc_state = NULL;
2812                 }
2813         }
2814
2815         drm_atomic_helper_resume(ddev, dm->cached_state);
2816
2817         dm->cached_state = NULL;
2818
2819         amdgpu_dm_irq_resume_late(adev);
2820
2821         amdgpu_dm_smu_write_watermarks_table(adev);
2822
2823         return 0;
2824 }
2825
2826 /**
2827  * DOC: DM Lifecycle
2828  *
2829  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2830  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2831  * the base driver's device list to be initialized and torn down accordingly.
2832  *
2833  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2834  */
2835
2836 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2837         .name = "dm",
2838         .early_init = dm_early_init,
2839         .late_init = dm_late_init,
2840         .sw_init = dm_sw_init,
2841         .sw_fini = dm_sw_fini,
2842         .early_fini = amdgpu_dm_early_fini,
2843         .hw_init = dm_hw_init,
2844         .hw_fini = dm_hw_fini,
2845         .suspend = dm_suspend,
2846         .resume = dm_resume,
2847         .is_idle = dm_is_idle,
2848         .wait_for_idle = dm_wait_for_idle,
2849         .check_soft_reset = dm_check_soft_reset,
2850         .soft_reset = dm_soft_reset,
2851         .set_clockgating_state = dm_set_clockgating_state,
2852         .set_powergating_state = dm_set_powergating_state,
2853 };
2854
2855 const struct amdgpu_ip_block_version dm_ip_block =
2856 {
2857         .type = AMD_IP_BLOCK_TYPE_DCE,
2858         .major = 1,
2859         .minor = 0,
2860         .rev = 0,
2861         .funcs = &amdgpu_dm_funcs,
2862 };
2863
2864
2865 /**
2866  * DOC: atomic
2867  *
2868  * *WIP*
2869  */
2870
2871 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2872         .fb_create = amdgpu_display_user_framebuffer_create,
2873         .get_format_info = amd_get_format_info,
2874         .output_poll_changed = drm_fb_helper_output_poll_changed,
2875         .atomic_check = amdgpu_dm_atomic_check,
2876         .atomic_commit = drm_atomic_helper_commit,
2877 };
2878
2879 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2880         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2881 };
2882
2883 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2884 {
2885         u32 max_avg, min_cll, max, min, q, r;
2886         struct amdgpu_dm_backlight_caps *caps;
2887         struct amdgpu_display_manager *dm;
2888         struct drm_connector *conn_base;
2889         struct amdgpu_device *adev;
2890         struct dc_link *link = NULL;
2891         static const u8 pre_computed_values[] = {
2892                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2893                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2894         int i;
2895
2896         if (!aconnector || !aconnector->dc_link)
2897                 return;
2898
2899         link = aconnector->dc_link;
2900         if (link->connector_signal != SIGNAL_TYPE_EDP)
2901                 return;
2902
2903         conn_base = &aconnector->base;
2904         adev = drm_to_adev(conn_base->dev);
2905         dm = &adev->dm;
2906         for (i = 0; i < dm->num_of_edps; i++) {
2907                 if (link == dm->backlight_link[i])
2908                         break;
2909         }
2910         if (i >= dm->num_of_edps)
2911                 return;
2912         caps = &dm->backlight_caps[i];
2913         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2914         caps->aux_support = false;
2915         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2916         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2917
2918         if (caps->ext_caps->bits.oled == 1 /*||
2919             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2920             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2921                 caps->aux_support = true;
2922
2923         if (amdgpu_backlight == 0)
2924                 caps->aux_support = false;
2925         else if (amdgpu_backlight == 1)
2926                 caps->aux_support = true;
2927
2928         /* From the specification (CTA-861-G), for calculating the maximum
2929          * luminance we need to use:
2930          *      Luminance = 50*2**(CV/32)
2931          * Where CV is a one-byte value.
2932          * For calculating this expression we may need float point precision;
2933          * to avoid this complexity level, we take advantage that CV is divided
2934          * by a constant. From the Euclids division algorithm, we know that CV
2935          * can be written as: CV = 32*q + r. Next, we replace CV in the
2936          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2937          * need to pre-compute the value of r/32. For pre-computing the values
2938          * We just used the following Ruby line:
2939          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2940          * The results of the above expressions can be verified at
2941          * pre_computed_values.
2942          */
2943         q = max_avg >> 5;
2944         r = max_avg % 32;
2945         max = (1 << q) * pre_computed_values[r];
2946
2947         // min luminance: maxLum * (CV/255)^2 / 100
2948         q = DIV_ROUND_CLOSEST(min_cll, 255);
2949         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2950
2951         caps->aux_max_input_signal = max;
2952         caps->aux_min_input_signal = min;
2953 }
2954
2955 void amdgpu_dm_update_connector_after_detect(
2956                 struct amdgpu_dm_connector *aconnector)
2957 {
2958         struct drm_connector *connector = &aconnector->base;
2959         struct drm_device *dev = connector->dev;
2960         struct dc_sink *sink;
2961
2962         /* MST handled by drm_mst framework */
2963         if (aconnector->mst_mgr.mst_state == true)
2964                 return;
2965
2966         sink = aconnector->dc_link->local_sink;
2967         if (sink)
2968                 dc_sink_retain(sink);
2969
2970         /*
2971          * Edid mgmt connector gets first update only in mode_valid hook and then
2972          * the connector sink is set to either fake or physical sink depends on link status.
2973          * Skip if already done during boot.
2974          */
2975         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2976                         && aconnector->dc_em_sink) {
2977
2978                 /*
2979                  * For S3 resume with headless use eml_sink to fake stream
2980                  * because on resume connector->sink is set to NULL
2981                  */
2982                 mutex_lock(&dev->mode_config.mutex);
2983
2984                 if (sink) {
2985                         if (aconnector->dc_sink) {
2986                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2987                                 /*
2988                                  * retain and release below are used to
2989                                  * bump up refcount for sink because the link doesn't point
2990                                  * to it anymore after disconnect, so on next crtc to connector
2991                                  * reshuffle by UMD we will get into unwanted dc_sink release
2992                                  */
2993                                 dc_sink_release(aconnector->dc_sink);
2994                         }
2995                         aconnector->dc_sink = sink;
2996                         dc_sink_retain(aconnector->dc_sink);
2997                         amdgpu_dm_update_freesync_caps(connector,
2998                                         aconnector->edid);
2999                 } else {
3000                         amdgpu_dm_update_freesync_caps(connector, NULL);
3001                         if (!aconnector->dc_sink) {
3002                                 aconnector->dc_sink = aconnector->dc_em_sink;
3003                                 dc_sink_retain(aconnector->dc_sink);
3004                         }
3005                 }
3006
3007                 mutex_unlock(&dev->mode_config.mutex);
3008
3009                 if (sink)
3010                         dc_sink_release(sink);
3011                 return;
3012         }
3013
3014         /*
3015          * TODO: temporary guard to look for proper fix
3016          * if this sink is MST sink, we should not do anything
3017          */
3018         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3019                 dc_sink_release(sink);
3020                 return;
3021         }
3022
3023         if (aconnector->dc_sink == sink) {
3024                 /*
3025                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3026                  * Do nothing!!
3027                  */
3028                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3029                                 aconnector->connector_id);
3030                 if (sink)
3031                         dc_sink_release(sink);
3032                 return;
3033         }
3034
3035         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3036                 aconnector->connector_id, aconnector->dc_sink, sink);
3037
3038         mutex_lock(&dev->mode_config.mutex);
3039
3040         /*
3041          * 1. Update status of the drm connector
3042          * 2. Send an event and let userspace tell us what to do
3043          */
3044         if (sink) {
3045                 /*
3046                  * TODO: check if we still need the S3 mode update workaround.
3047                  * If yes, put it here.
3048                  */
3049                 if (aconnector->dc_sink) {
3050                         amdgpu_dm_update_freesync_caps(connector, NULL);
3051                         dc_sink_release(aconnector->dc_sink);
3052                 }
3053
3054                 aconnector->dc_sink = sink;
3055                 dc_sink_retain(aconnector->dc_sink);
3056                 if (sink->dc_edid.length == 0) {
3057                         aconnector->edid = NULL;
3058                         if (aconnector->dc_link->aux_mode) {
3059                                 drm_dp_cec_unset_edid(
3060                                         &aconnector->dm_dp_aux.aux);
3061                         }
3062                 } else {
3063                         aconnector->edid =
3064                                 (struct edid *)sink->dc_edid.raw_edid;
3065
3066                         if (aconnector->dc_link->aux_mode)
3067                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3068                                                     aconnector->edid);
3069                 }
3070
3071                 drm_connector_update_edid_property(connector, aconnector->edid);
3072                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3073                 update_connector_ext_caps(aconnector);
3074         } else {
3075                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3076                 amdgpu_dm_update_freesync_caps(connector, NULL);
3077                 drm_connector_update_edid_property(connector, NULL);
3078                 aconnector->num_modes = 0;
3079                 dc_sink_release(aconnector->dc_sink);
3080                 aconnector->dc_sink = NULL;
3081                 aconnector->edid = NULL;
3082 #ifdef CONFIG_DRM_AMD_DC_HDCP
3083                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3084                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3085                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3086 #endif
3087         }
3088
3089         mutex_unlock(&dev->mode_config.mutex);
3090
3091         update_subconnector_property(aconnector);
3092
3093         if (sink)
3094                 dc_sink_release(sink);
3095 }
3096
3097 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3098 {
3099         struct drm_connector *connector = &aconnector->base;
3100         struct drm_device *dev = connector->dev;
3101         enum dc_connection_type new_connection_type = dc_connection_none;
3102         struct amdgpu_device *adev = drm_to_adev(dev);
3103         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3104         struct dm_crtc_state *dm_crtc_state = NULL;
3105
3106         if (adev->dm.disable_hpd_irq)
3107                 return;
3108
3109         if (dm_con_state->base.state && dm_con_state->base.crtc)
3110                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3111                                         dm_con_state->base.state,
3112                                         dm_con_state->base.crtc));
3113         /*
3114          * In case of failure or MST no need to update connector status or notify the OS
3115          * since (for MST case) MST does this in its own context.
3116          */
3117         mutex_lock(&aconnector->hpd_lock);
3118
3119 #ifdef CONFIG_DRM_AMD_DC_HDCP
3120         if (adev->dm.hdcp_workqueue) {
3121                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3122                 dm_con_state->update_hdcp = true;
3123         }
3124 #endif
3125         if (aconnector->fake_enable)
3126                 aconnector->fake_enable = false;
3127
3128         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3129                 DRM_ERROR("KMS: Failed to detect connector\n");
3130
3131         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3132                 emulated_link_detect(aconnector->dc_link);
3133
3134                 drm_modeset_lock_all(dev);
3135                 dm_restore_drm_connector_state(dev, connector);
3136                 drm_modeset_unlock_all(dev);
3137
3138                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3139                         drm_kms_helper_connector_hotplug_event(connector);
3140
3141         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3142                 if (new_connection_type == dc_connection_none &&
3143                     aconnector->dc_link->type == dc_connection_none &&
3144                     dm_crtc_state)
3145                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3146
3147                 amdgpu_dm_update_connector_after_detect(aconnector);
3148
3149                 drm_modeset_lock_all(dev);
3150                 dm_restore_drm_connector_state(dev, connector);
3151                 drm_modeset_unlock_all(dev);
3152
3153                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3154                         drm_kms_helper_connector_hotplug_event(connector);
3155         }
3156         mutex_unlock(&aconnector->hpd_lock);
3157
3158 }
3159
3160 static void handle_hpd_irq(void *param)
3161 {
3162         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3163
3164         handle_hpd_irq_helper(aconnector);
3165
3166 }
3167
3168 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3169 {
3170         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3171         uint8_t dret;
3172         bool new_irq_handled = false;
3173         int dpcd_addr;
3174         int dpcd_bytes_to_read;
3175
3176         const int max_process_count = 30;
3177         int process_count = 0;
3178
3179         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3180
3181         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3182                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3183                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3184                 dpcd_addr = DP_SINK_COUNT;
3185         } else {
3186                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3187                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3188                 dpcd_addr = DP_SINK_COUNT_ESI;
3189         }
3190
3191         dret = drm_dp_dpcd_read(
3192                 &aconnector->dm_dp_aux.aux,
3193                 dpcd_addr,
3194                 esi,
3195                 dpcd_bytes_to_read);
3196
3197         while (dret == dpcd_bytes_to_read &&
3198                 process_count < max_process_count) {
3199                 uint8_t retry;
3200                 dret = 0;
3201
3202                 process_count++;
3203
3204                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3205                 /* handle HPD short pulse irq */
3206                 if (aconnector->mst_mgr.mst_state)
3207                         drm_dp_mst_hpd_irq(
3208                                 &aconnector->mst_mgr,
3209                                 esi,
3210                                 &new_irq_handled);
3211
3212                 if (new_irq_handled) {
3213                         /* ACK at DPCD to notify down stream */
3214                         const int ack_dpcd_bytes_to_write =
3215                                 dpcd_bytes_to_read - 1;
3216
3217                         for (retry = 0; retry < 3; retry++) {
3218                                 uint8_t wret;
3219
3220                                 wret = drm_dp_dpcd_write(
3221                                         &aconnector->dm_dp_aux.aux,
3222                                         dpcd_addr + 1,
3223                                         &esi[1],
3224                                         ack_dpcd_bytes_to_write);
3225                                 if (wret == ack_dpcd_bytes_to_write)
3226                                         break;
3227                         }
3228
3229                         /* check if there is new irq to be handled */
3230                         dret = drm_dp_dpcd_read(
3231                                 &aconnector->dm_dp_aux.aux,
3232                                 dpcd_addr,
3233                                 esi,
3234                                 dpcd_bytes_to_read);
3235
3236                         new_irq_handled = false;
3237                 } else {
3238                         break;
3239                 }
3240         }
3241
3242         if (process_count == max_process_count)
3243                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3244 }
3245
3246 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3247                                                         union hpd_irq_data hpd_irq_data)
3248 {
3249         struct hpd_rx_irq_offload_work *offload_work =
3250                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3251
3252         if (!offload_work) {
3253                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3254                 return;
3255         }
3256
3257         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3258         offload_work->data = hpd_irq_data;
3259         offload_work->offload_wq = offload_wq;
3260
3261         queue_work(offload_wq->wq, &offload_work->work);
3262         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3263 }
3264
3265 static void handle_hpd_rx_irq(void *param)
3266 {
3267         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3268         struct drm_connector *connector = &aconnector->base;
3269         struct drm_device *dev = connector->dev;
3270         struct dc_link *dc_link = aconnector->dc_link;
3271         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3272         bool result = false;
3273         enum dc_connection_type new_connection_type = dc_connection_none;
3274         struct amdgpu_device *adev = drm_to_adev(dev);
3275         union hpd_irq_data hpd_irq_data;
3276         bool link_loss = false;
3277         bool has_left_work = false;
3278         int idx = aconnector->base.index;
3279         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3280
3281         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3282
3283         if (adev->dm.disable_hpd_irq)
3284                 return;
3285
3286         /*
3287          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3288          * conflict, after implement i2c helper, this mutex should be
3289          * retired.
3290          */
3291         mutex_lock(&aconnector->hpd_lock);
3292
3293         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3294                                                 &link_loss, true, &has_left_work);
3295
3296         if (!has_left_work)
3297                 goto out;
3298
3299         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3300                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3301                 goto out;
3302         }
3303
3304         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3305                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3306                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3307                         dm_handle_mst_sideband_msg(aconnector);
3308                         goto out;
3309                 }
3310
3311                 if (link_loss) {
3312                         bool skip = false;
3313
3314                         spin_lock(&offload_wq->offload_lock);
3315                         skip = offload_wq->is_handling_link_loss;
3316
3317                         if (!skip)
3318                                 offload_wq->is_handling_link_loss = true;
3319
3320                         spin_unlock(&offload_wq->offload_lock);
3321
3322                         if (!skip)
3323                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3324
3325                         goto out;
3326                 }
3327         }
3328
3329 out:
3330         if (result && !is_mst_root_connector) {
3331                 /* Downstream Port status changed. */
3332                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3333                         DRM_ERROR("KMS: Failed to detect connector\n");
3334
3335                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3336                         emulated_link_detect(dc_link);
3337
3338                         if (aconnector->fake_enable)
3339                                 aconnector->fake_enable = false;
3340
3341                         amdgpu_dm_update_connector_after_detect(aconnector);
3342
3343
3344                         drm_modeset_lock_all(dev);
3345                         dm_restore_drm_connector_state(dev, connector);
3346                         drm_modeset_unlock_all(dev);
3347
3348                         drm_kms_helper_connector_hotplug_event(connector);
3349                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3350
3351                         if (aconnector->fake_enable)
3352                                 aconnector->fake_enable = false;
3353
3354                         amdgpu_dm_update_connector_after_detect(aconnector);
3355
3356
3357                         drm_modeset_lock_all(dev);
3358                         dm_restore_drm_connector_state(dev, connector);
3359                         drm_modeset_unlock_all(dev);
3360
3361                         drm_kms_helper_connector_hotplug_event(connector);
3362                 }
3363         }
3364 #ifdef CONFIG_DRM_AMD_DC_HDCP
3365         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3366                 if (adev->dm.hdcp_workqueue)
3367                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3368         }
3369 #endif
3370
3371         if (dc_link->type != dc_connection_mst_branch)
3372                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3373
3374         mutex_unlock(&aconnector->hpd_lock);
3375 }
3376
3377 static void register_hpd_handlers(struct amdgpu_device *adev)
3378 {
3379         struct drm_device *dev = adev_to_drm(adev);
3380         struct drm_connector *connector;
3381         struct amdgpu_dm_connector *aconnector;
3382         const struct dc_link *dc_link;
3383         struct dc_interrupt_params int_params = {0};
3384
3385         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3386         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3387
3388         list_for_each_entry(connector,
3389                         &dev->mode_config.connector_list, head) {
3390
3391                 aconnector = to_amdgpu_dm_connector(connector);
3392                 dc_link = aconnector->dc_link;
3393
3394                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3395                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3396                         int_params.irq_source = dc_link->irq_source_hpd;
3397
3398                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3399                                         handle_hpd_irq,
3400                                         (void *) aconnector);
3401                 }
3402
3403                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3404
3405                         /* Also register for DP short pulse (hpd_rx). */
3406                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3407                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3408
3409                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3410                                         handle_hpd_rx_irq,
3411                                         (void *) aconnector);
3412
3413                         if (adev->dm.hpd_rx_offload_wq)
3414                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3415                                         aconnector;
3416                 }
3417         }
3418 }
3419
3420 #if defined(CONFIG_DRM_AMD_DC_SI)
3421 /* Register IRQ sources and initialize IRQ callbacks */
3422 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3423 {
3424         struct dc *dc = adev->dm.dc;
3425         struct common_irq_params *c_irq_params;
3426         struct dc_interrupt_params int_params = {0};
3427         int r;
3428         int i;
3429         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3430
3431         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3432         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3433
3434         /*
3435          * Actions of amdgpu_irq_add_id():
3436          * 1. Register a set() function with base driver.
3437          *    Base driver will call set() function to enable/disable an
3438          *    interrupt in DC hardware.
3439          * 2. Register amdgpu_dm_irq_handler().
3440          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3441          *    coming from DC hardware.
3442          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3443          *    for acknowledging and handling. */
3444
3445         /* Use VBLANK interrupt */
3446         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3447                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3448                 if (r) {
3449                         DRM_ERROR("Failed to add crtc irq id!\n");
3450                         return r;
3451                 }
3452
3453                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3454                 int_params.irq_source =
3455                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3456
3457                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3458
3459                 c_irq_params->adev = adev;
3460                 c_irq_params->irq_src = int_params.irq_source;
3461
3462                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3463                                 dm_crtc_high_irq, c_irq_params);
3464         }
3465
3466         /* Use GRPH_PFLIP interrupt */
3467         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3468                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3469                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3470                 if (r) {
3471                         DRM_ERROR("Failed to add page flip irq id!\n");
3472                         return r;
3473                 }
3474
3475                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3476                 int_params.irq_source =
3477                         dc_interrupt_to_irq_source(dc, i, 0);
3478
3479                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3480
3481                 c_irq_params->adev = adev;
3482                 c_irq_params->irq_src = int_params.irq_source;
3483
3484                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3485                                 dm_pflip_high_irq, c_irq_params);
3486
3487         }
3488
3489         /* HPD */
3490         r = amdgpu_irq_add_id(adev, client_id,
3491                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3492         if (r) {
3493                 DRM_ERROR("Failed to add hpd irq id!\n");
3494                 return r;
3495         }
3496
3497         register_hpd_handlers(adev);
3498
3499         return 0;
3500 }
3501 #endif
3502
3503 /* Register IRQ sources and initialize IRQ callbacks */
3504 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3505 {
3506         struct dc *dc = adev->dm.dc;
3507         struct common_irq_params *c_irq_params;
3508         struct dc_interrupt_params int_params = {0};
3509         int r;
3510         int i;
3511         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3512
3513         if (adev->family >= AMDGPU_FAMILY_AI)
3514                 client_id = SOC15_IH_CLIENTID_DCE;
3515
3516         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3517         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3518
3519         /*
3520          * Actions of amdgpu_irq_add_id():
3521          * 1. Register a set() function with base driver.
3522          *    Base driver will call set() function to enable/disable an
3523          *    interrupt in DC hardware.
3524          * 2. Register amdgpu_dm_irq_handler().
3525          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3526          *    coming from DC hardware.
3527          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3528          *    for acknowledging and handling. */
3529
3530         /* Use VBLANK interrupt */
3531         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3532                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3533                 if (r) {
3534                         DRM_ERROR("Failed to add crtc irq id!\n");
3535                         return r;
3536                 }
3537
3538                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3539                 int_params.irq_source =
3540                         dc_interrupt_to_irq_source(dc, i, 0);
3541
3542                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3543
3544                 c_irq_params->adev = adev;
3545                 c_irq_params->irq_src = int_params.irq_source;
3546
3547                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3548                                 dm_crtc_high_irq, c_irq_params);
3549         }
3550
3551         /* Use VUPDATE interrupt */
3552         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3553                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3554                 if (r) {
3555                         DRM_ERROR("Failed to add vupdate irq id!\n");
3556                         return r;
3557                 }
3558
3559                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3560                 int_params.irq_source =
3561                         dc_interrupt_to_irq_source(dc, i, 0);
3562
3563                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3564
3565                 c_irq_params->adev = adev;
3566                 c_irq_params->irq_src = int_params.irq_source;
3567
3568                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3569                                 dm_vupdate_high_irq, c_irq_params);
3570         }
3571
3572         /* Use GRPH_PFLIP interrupt */
3573         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3574                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3575                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3576                 if (r) {
3577                         DRM_ERROR("Failed to add page flip irq id!\n");
3578                         return r;
3579                 }
3580
3581                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582                 int_params.irq_source =
3583                         dc_interrupt_to_irq_source(dc, i, 0);
3584
3585                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3586
3587                 c_irq_params->adev = adev;
3588                 c_irq_params->irq_src = int_params.irq_source;
3589
3590                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3591                                 dm_pflip_high_irq, c_irq_params);
3592
3593         }
3594
3595         /* HPD */
3596         r = amdgpu_irq_add_id(adev, client_id,
3597                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3598         if (r) {
3599                 DRM_ERROR("Failed to add hpd irq id!\n");
3600                 return r;
3601         }
3602
3603         register_hpd_handlers(adev);
3604
3605         return 0;
3606 }
3607
3608 /* Register IRQ sources and initialize IRQ callbacks */
3609 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3610 {
3611         struct dc *dc = adev->dm.dc;
3612         struct common_irq_params *c_irq_params;
3613         struct dc_interrupt_params int_params = {0};
3614         int r;
3615         int i;
3616 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3617         static const unsigned int vrtl_int_srcid[] = {
3618                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3619                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3620                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3621                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3622                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3623                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3624         };
3625 #endif
3626
3627         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3628         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3629
3630         /*
3631          * Actions of amdgpu_irq_add_id():
3632          * 1. Register a set() function with base driver.
3633          *    Base driver will call set() function to enable/disable an
3634          *    interrupt in DC hardware.
3635          * 2. Register amdgpu_dm_irq_handler().
3636          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3637          *    coming from DC hardware.
3638          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3639          *    for acknowledging and handling.
3640          */
3641
3642         /* Use VSTARTUP interrupt */
3643         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3644                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3645                         i++) {
3646                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3647
3648                 if (r) {
3649                         DRM_ERROR("Failed to add crtc irq id!\n");
3650                         return r;
3651                 }
3652
3653                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3654                 int_params.irq_source =
3655                         dc_interrupt_to_irq_source(dc, i, 0);
3656
3657                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3658
3659                 c_irq_params->adev = adev;
3660                 c_irq_params->irq_src = int_params.irq_source;
3661
3662                 amdgpu_dm_irq_register_interrupt(
3663                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3664         }
3665
3666         /* Use otg vertical line interrupt */
3667 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3668         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3669                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3670                                 vrtl_int_srcid[i], &adev->vline0_irq);
3671
3672                 if (r) {
3673                         DRM_ERROR("Failed to add vline0 irq id!\n");
3674                         return r;
3675                 }
3676
3677                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3678                 int_params.irq_source =
3679                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3680
3681                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3682                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3683                         break;
3684                 }
3685
3686                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3687                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3688
3689                 c_irq_params->adev = adev;
3690                 c_irq_params->irq_src = int_params.irq_source;
3691
3692                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3693                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3694         }
3695 #endif
3696
3697         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3698          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3699          * to trigger at end of each vblank, regardless of state of the lock,
3700          * matching DCE behaviour.
3701          */
3702         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3703              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3704              i++) {
3705                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3706
3707                 if (r) {
3708                         DRM_ERROR("Failed to add vupdate irq id!\n");
3709                         return r;
3710                 }
3711
3712                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3713                 int_params.irq_source =
3714                         dc_interrupt_to_irq_source(dc, i, 0);
3715
3716                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3717
3718                 c_irq_params->adev = adev;
3719                 c_irq_params->irq_src = int_params.irq_source;
3720
3721                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3722                                 dm_vupdate_high_irq, c_irq_params);
3723         }
3724
3725         /* Use GRPH_PFLIP interrupt */
3726         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3727                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3728                         i++) {
3729                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3730                 if (r) {
3731                         DRM_ERROR("Failed to add page flip irq id!\n");
3732                         return r;
3733                 }
3734
3735                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3736                 int_params.irq_source =
3737                         dc_interrupt_to_irq_source(dc, i, 0);
3738
3739                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3740
3741                 c_irq_params->adev = adev;
3742                 c_irq_params->irq_src = int_params.irq_source;
3743
3744                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3745                                 dm_pflip_high_irq, c_irq_params);
3746
3747         }
3748
3749         /* HPD */
3750         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3751                         &adev->hpd_irq);
3752         if (r) {
3753                 DRM_ERROR("Failed to add hpd irq id!\n");
3754                 return r;
3755         }
3756
3757         register_hpd_handlers(adev);
3758
3759         return 0;
3760 }
3761 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3762 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3763 {
3764         struct dc *dc = adev->dm.dc;
3765         struct common_irq_params *c_irq_params;
3766         struct dc_interrupt_params int_params = {0};
3767         int r, i;
3768
3769         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3770         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3771
3772         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3773                         &adev->dmub_outbox_irq);
3774         if (r) {
3775                 DRM_ERROR("Failed to add outbox irq id!\n");
3776                 return r;
3777         }
3778
3779         if (dc->ctx->dmub_srv) {
3780                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3781                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3782                 int_params.irq_source =
3783                 dc_interrupt_to_irq_source(dc, i, 0);
3784
3785                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3786
3787                 c_irq_params->adev = adev;
3788                 c_irq_params->irq_src = int_params.irq_source;
3789
3790                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3791                                 dm_dmub_outbox1_low_irq, c_irq_params);
3792         }
3793
3794         return 0;
3795 }
3796
3797 /*
3798  * Acquires the lock for the atomic state object and returns
3799  * the new atomic state.
3800  *
3801  * This should only be called during atomic check.
3802  */
3803 int dm_atomic_get_state(struct drm_atomic_state *state,
3804                         struct dm_atomic_state **dm_state)
3805 {
3806         struct drm_device *dev = state->dev;
3807         struct amdgpu_device *adev = drm_to_adev(dev);
3808         struct amdgpu_display_manager *dm = &adev->dm;
3809         struct drm_private_state *priv_state;
3810
3811         if (*dm_state)
3812                 return 0;
3813
3814         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3815         if (IS_ERR(priv_state))
3816                 return PTR_ERR(priv_state);
3817
3818         *dm_state = to_dm_atomic_state(priv_state);
3819
3820         return 0;
3821 }
3822
3823 static struct dm_atomic_state *
3824 dm_atomic_get_new_state(struct drm_atomic_state *state)
3825 {
3826         struct drm_device *dev = state->dev;
3827         struct amdgpu_device *adev = drm_to_adev(dev);
3828         struct amdgpu_display_manager *dm = &adev->dm;
3829         struct drm_private_obj *obj;
3830         struct drm_private_state *new_obj_state;
3831         int i;
3832
3833         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3834                 if (obj->funcs == dm->atomic_obj.funcs)
3835                         return to_dm_atomic_state(new_obj_state);
3836         }
3837
3838         return NULL;
3839 }
3840
3841 static struct drm_private_state *
3842 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3843 {
3844         struct dm_atomic_state *old_state, *new_state;
3845
3846         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3847         if (!new_state)
3848                 return NULL;
3849
3850         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3851
3852         old_state = to_dm_atomic_state(obj->state);
3853
3854         if (old_state && old_state->context)
3855                 new_state->context = dc_copy_state(old_state->context);
3856
3857         if (!new_state->context) {
3858                 kfree(new_state);
3859                 return NULL;
3860         }
3861
3862         return &new_state->base;
3863 }
3864
3865 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3866                                     struct drm_private_state *state)
3867 {
3868         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3869
3870         if (dm_state && dm_state->context)
3871                 dc_release_state(dm_state->context);
3872
3873         kfree(dm_state);
3874 }
3875
3876 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3877         .atomic_duplicate_state = dm_atomic_duplicate_state,
3878         .atomic_destroy_state = dm_atomic_destroy_state,
3879 };
3880
3881 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3882 {
3883         struct dm_atomic_state *state;
3884         int r;
3885
3886         adev->mode_info.mode_config_initialized = true;
3887
3888         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3889         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3890
3891         adev_to_drm(adev)->mode_config.max_width = 16384;
3892         adev_to_drm(adev)->mode_config.max_height = 16384;
3893
3894         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3895         /* disable prefer shadow for now due to hibernation issues */
3896         adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3897         /* indicates support for immediate flip */
3898         adev_to_drm(adev)->mode_config.async_page_flip = true;
3899
3900         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3901
3902         state = kzalloc(sizeof(*state), GFP_KERNEL);
3903         if (!state)
3904                 return -ENOMEM;
3905
3906         state->context = dc_create_state(adev->dm.dc);
3907         if (!state->context) {
3908                 kfree(state);
3909                 return -ENOMEM;
3910         }
3911
3912         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3913
3914         drm_atomic_private_obj_init(adev_to_drm(adev),
3915                                     &adev->dm.atomic_obj,
3916                                     &state->base,
3917                                     &dm_atomic_state_funcs);
3918
3919         r = amdgpu_display_modeset_create_props(adev);
3920         if (r) {
3921                 dc_release_state(state->context);
3922                 kfree(state);
3923                 return r;
3924         }
3925
3926         r = amdgpu_dm_audio_init(adev);
3927         if (r) {
3928                 dc_release_state(state->context);
3929                 kfree(state);
3930                 return r;
3931         }
3932
3933         return 0;
3934 }
3935
3936 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3937 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3938 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3939
3940 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3941         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3942
3943 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3944                                             int bl_idx)
3945 {
3946 #if defined(CONFIG_ACPI)
3947         struct amdgpu_dm_backlight_caps caps;
3948
3949         memset(&caps, 0, sizeof(caps));
3950
3951         if (dm->backlight_caps[bl_idx].caps_valid)
3952                 return;
3953
3954         amdgpu_acpi_get_backlight_caps(&caps);
3955         if (caps.caps_valid) {
3956                 dm->backlight_caps[bl_idx].caps_valid = true;
3957                 if (caps.aux_support)
3958                         return;
3959                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3960                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3961         } else {
3962                 dm->backlight_caps[bl_idx].min_input_signal =
3963                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3964                 dm->backlight_caps[bl_idx].max_input_signal =
3965                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3966         }
3967 #else
3968         if (dm->backlight_caps[bl_idx].aux_support)
3969                 return;
3970
3971         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3972         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3973 #endif
3974 }
3975
3976 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3977                                 unsigned *min, unsigned *max)
3978 {
3979         if (!caps)
3980                 return 0;
3981
3982         if (caps->aux_support) {
3983                 // Firmware limits are in nits, DC API wants millinits.
3984                 *max = 1000 * caps->aux_max_input_signal;
3985                 *min = 1000 * caps->aux_min_input_signal;
3986         } else {
3987                 // Firmware limits are 8-bit, PWM control is 16-bit.
3988                 *max = 0x101 * caps->max_input_signal;
3989                 *min = 0x101 * caps->min_input_signal;
3990         }
3991         return 1;
3992 }
3993
3994 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3995                                         uint32_t brightness)
3996 {
3997         unsigned min, max;
3998
3999         if (!get_brightness_range(caps, &min, &max))
4000                 return brightness;
4001
4002         // Rescale 0..255 to min..max
4003         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4004                                        AMDGPU_MAX_BL_LEVEL);
4005 }
4006
4007 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4008                                       uint32_t brightness)
4009 {
4010         unsigned min, max;
4011
4012         if (!get_brightness_range(caps, &min, &max))
4013                 return brightness;
4014
4015         if (brightness < min)
4016                 return 0;
4017         // Rescale min..max to 0..255
4018         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4019                                  max - min);
4020 }
4021
4022 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4023                                          int bl_idx,
4024                                          u32 user_brightness)
4025 {
4026         struct amdgpu_dm_backlight_caps caps;
4027         struct dc_link *link;
4028         u32 brightness;
4029         bool rc;
4030
4031         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4032         caps = dm->backlight_caps[bl_idx];
4033
4034         dm->brightness[bl_idx] = user_brightness;
4035         /* update scratch register */
4036         if (bl_idx == 0)
4037                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4038         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4039         link = (struct dc_link *)dm->backlight_link[bl_idx];
4040
4041         /* Change brightness based on AUX property */
4042         if (caps.aux_support) {
4043                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4044                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4045                 if (!rc)
4046                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4047         } else {
4048                 rc = dc_link_set_backlight_level(link, brightness, 0);
4049                 if (!rc)
4050                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4051         }
4052
4053         if (rc)
4054                 dm->actual_brightness[bl_idx] = user_brightness;
4055 }
4056
4057 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4058 {
4059         struct amdgpu_display_manager *dm = bl_get_data(bd);
4060         int i;
4061
4062         for (i = 0; i < dm->num_of_edps; i++) {
4063                 if (bd == dm->backlight_dev[i])
4064                         break;
4065         }
4066         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4067                 i = 0;
4068         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4069
4070         return 0;
4071 }
4072
4073 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4074                                          int bl_idx)
4075 {
4076         struct amdgpu_dm_backlight_caps caps;
4077         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4078
4079         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4080         caps = dm->backlight_caps[bl_idx];
4081
4082         if (caps.aux_support) {
4083                 u32 avg, peak;
4084                 bool rc;
4085
4086                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4087                 if (!rc)
4088                         return dm->brightness[bl_idx];
4089                 return convert_brightness_to_user(&caps, avg);
4090         } else {
4091                 int ret = dc_link_get_backlight_level(link);
4092
4093                 if (ret == DC_ERROR_UNEXPECTED)
4094                         return dm->brightness[bl_idx];
4095                 return convert_brightness_to_user(&caps, ret);
4096         }
4097 }
4098
4099 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4100 {
4101         struct amdgpu_display_manager *dm = bl_get_data(bd);
4102         int i;
4103
4104         for (i = 0; i < dm->num_of_edps; i++) {
4105                 if (bd == dm->backlight_dev[i])
4106                         break;
4107         }
4108         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4109                 i = 0;
4110         return amdgpu_dm_backlight_get_level(dm, i);
4111 }
4112
4113 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4114         .options = BL_CORE_SUSPENDRESUME,
4115         .get_brightness = amdgpu_dm_backlight_get_brightness,
4116         .update_status  = amdgpu_dm_backlight_update_status,
4117 };
4118
4119 static void
4120 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4121 {
4122         char bl_name[16];
4123         struct backlight_properties props = { 0 };
4124
4125         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4126         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4127
4128         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4129         props.brightness = AMDGPU_MAX_BL_LEVEL;
4130         props.type = BACKLIGHT_RAW;
4131
4132         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4133                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4134
4135         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4136                                                                        adev_to_drm(dm->adev)->dev,
4137                                                                        dm,
4138                                                                        &amdgpu_dm_backlight_ops,
4139                                                                        &props);
4140
4141         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4142                 DRM_ERROR("DM: Backlight registration failed!\n");
4143         else
4144                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4145 }
4146 #endif
4147
4148 static int initialize_plane(struct amdgpu_display_manager *dm,
4149                             struct amdgpu_mode_info *mode_info, int plane_id,
4150                             enum drm_plane_type plane_type,
4151                             const struct dc_plane_cap *plane_cap)
4152 {
4153         struct drm_plane *plane;
4154         unsigned long possible_crtcs;
4155         int ret = 0;
4156
4157         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4158         if (!plane) {
4159                 DRM_ERROR("KMS: Failed to allocate plane\n");
4160                 return -ENOMEM;
4161         }
4162         plane->type = plane_type;
4163
4164         /*
4165          * HACK: IGT tests expect that the primary plane for a CRTC
4166          * can only have one possible CRTC. Only expose support for
4167          * any CRTC if they're not going to be used as a primary plane
4168          * for a CRTC - like overlay or underlay planes.
4169          */
4170         possible_crtcs = 1 << plane_id;
4171         if (plane_id >= dm->dc->caps.max_streams)
4172                 possible_crtcs = 0xff;
4173
4174         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4175
4176         if (ret) {
4177                 DRM_ERROR("KMS: Failed to initialize plane\n");
4178                 kfree(plane);
4179                 return ret;
4180         }
4181
4182         if (mode_info)
4183                 mode_info->planes[plane_id] = plane;
4184
4185         return ret;
4186 }
4187
4188
4189 static void register_backlight_device(struct amdgpu_display_manager *dm,
4190                                       struct dc_link *link)
4191 {
4192 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4193         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4194
4195         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4196             link->type != dc_connection_none) {
4197                 /*
4198                  * Event if registration failed, we should continue with
4199                  * DM initialization because not having a backlight control
4200                  * is better then a black screen.
4201                  */
4202                 if (!dm->backlight_dev[dm->num_of_edps])
4203                         amdgpu_dm_register_backlight_device(dm);
4204
4205                 if (dm->backlight_dev[dm->num_of_edps]) {
4206                         dm->backlight_link[dm->num_of_edps] = link;
4207                         dm->num_of_edps++;
4208                 }
4209         }
4210 #endif
4211 }
4212
4213
4214 /*
4215  * In this architecture, the association
4216  * connector -> encoder -> crtc
4217  * id not really requried. The crtc and connector will hold the
4218  * display_index as an abstraction to use with DAL component
4219  *
4220  * Returns 0 on success
4221  */
4222 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4223 {
4224         struct amdgpu_display_manager *dm = &adev->dm;
4225         int32_t i;
4226         struct amdgpu_dm_connector *aconnector = NULL;
4227         struct amdgpu_encoder *aencoder = NULL;
4228         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4229         uint32_t link_cnt;
4230         int32_t primary_planes;
4231         enum dc_connection_type new_connection_type = dc_connection_none;
4232         const struct dc_plane_cap *plane;
4233         bool psr_feature_enabled = false;
4234
4235         dm->display_indexes_num = dm->dc->caps.max_streams;
4236         /* Update the actual used number of crtc */
4237         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4238
4239         link_cnt = dm->dc->caps.max_links;
4240         if (amdgpu_dm_mode_config_init(dm->adev)) {
4241                 DRM_ERROR("DM: Failed to initialize mode config\n");
4242                 return -EINVAL;
4243         }
4244
4245         /* There is one primary plane per CRTC */
4246         primary_planes = dm->dc->caps.max_streams;
4247         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4248
4249         /*
4250          * Initialize primary planes, implicit planes for legacy IOCTLS.
4251          * Order is reversed to match iteration order in atomic check.
4252          */
4253         for (i = (primary_planes - 1); i >= 0; i--) {
4254                 plane = &dm->dc->caps.planes[i];
4255
4256                 if (initialize_plane(dm, mode_info, i,
4257                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4258                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4259                         goto fail;
4260                 }
4261         }
4262
4263         /*
4264          * Initialize overlay planes, index starting after primary planes.
4265          * These planes have a higher DRM index than the primary planes since
4266          * they should be considered as having a higher z-order.
4267          * Order is reversed to match iteration order in atomic check.
4268          *
4269          * Only support DCN for now, and only expose one so we don't encourage
4270          * userspace to use up all the pipes.
4271          */
4272         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4273                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4274
4275                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4276                         continue;
4277
4278                 if (!plane->blends_with_above || !plane->blends_with_below)
4279                         continue;
4280
4281                 if (!plane->pixel_format_support.argb8888)
4282                         continue;
4283
4284                 if (initialize_plane(dm, NULL, primary_planes + i,
4285                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4286                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4287                         goto fail;
4288                 }
4289
4290                 /* Only create one overlay plane. */
4291                 break;
4292         }
4293
4294         for (i = 0; i < dm->dc->caps.max_streams; i++)
4295                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4296                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4297                         goto fail;
4298                 }
4299
4300         /* Use Outbox interrupt */
4301         switch (adev->ip_versions[DCE_HWIP][0]) {
4302         case IP_VERSION(3, 0, 0):
4303         case IP_VERSION(3, 1, 2):
4304         case IP_VERSION(3, 1, 3):
4305         case IP_VERSION(3, 1, 5):
4306         case IP_VERSION(3, 1, 6):
4307         case IP_VERSION(2, 1, 0):
4308                 if (register_outbox_irq_handlers(dm->adev)) {
4309                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4310                         goto fail;
4311                 }
4312                 break;
4313         default:
4314                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4315                               adev->ip_versions[DCE_HWIP][0]);
4316         }
4317
4318         /* Determine whether to enable PSR support by default. */
4319         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4320                 switch (adev->ip_versions[DCE_HWIP][0]) {
4321                 case IP_VERSION(3, 1, 2):
4322                 case IP_VERSION(3, 1, 3):
4323                 case IP_VERSION(3, 1, 5):
4324                 case IP_VERSION(3, 1, 6):
4325                         psr_feature_enabled = true;
4326                         break;
4327                 default:
4328                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4329                         break;
4330                 }
4331         }
4332
4333         /* loops over all connectors on the board */
4334         for (i = 0; i < link_cnt; i++) {
4335                 struct dc_link *link = NULL;
4336
4337                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4338                         DRM_ERROR(
4339                                 "KMS: Cannot support more than %d display indexes\n",
4340                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4341                         continue;
4342                 }
4343
4344                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4345                 if (!aconnector)
4346                         goto fail;
4347
4348                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4349                 if (!aencoder)
4350                         goto fail;
4351
4352                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4353                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4354                         goto fail;
4355                 }
4356
4357                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4358                         DRM_ERROR("KMS: Failed to initialize connector\n");
4359                         goto fail;
4360                 }
4361
4362                 link = dc_get_link_at_index(dm->dc, i);
4363
4364                 if (!dc_link_detect_sink(link, &new_connection_type))
4365                         DRM_ERROR("KMS: Failed to detect connector\n");
4366
4367                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4368                         emulated_link_detect(link);
4369                         amdgpu_dm_update_connector_after_detect(aconnector);
4370
4371                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4372                         amdgpu_dm_update_connector_after_detect(aconnector);
4373                         register_backlight_device(dm, link);
4374                         if (dm->num_of_edps)
4375                                 update_connector_ext_caps(aconnector);
4376                         if (psr_feature_enabled)
4377                                 amdgpu_dm_set_psr_caps(link);
4378
4379                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4380                          * PSR is also supported.
4381                          */
4382                         if (link->psr_settings.psr_feature_enabled)
4383                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4384                 }
4385
4386
4387         }
4388
4389         /* Software is initialized. Now we can register interrupt handlers. */
4390         switch (adev->asic_type) {
4391 #if defined(CONFIG_DRM_AMD_DC_SI)
4392         case CHIP_TAHITI:
4393         case CHIP_PITCAIRN:
4394         case CHIP_VERDE:
4395         case CHIP_OLAND:
4396                 if (dce60_register_irq_handlers(dm->adev)) {
4397                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4398                         goto fail;
4399                 }
4400                 break;
4401 #endif
4402         case CHIP_BONAIRE:
4403         case CHIP_HAWAII:
4404         case CHIP_KAVERI:
4405         case CHIP_KABINI:
4406         case CHIP_MULLINS:
4407         case CHIP_TONGA:
4408         case CHIP_FIJI:
4409         case CHIP_CARRIZO:
4410         case CHIP_STONEY:
4411         case CHIP_POLARIS11:
4412         case CHIP_POLARIS10:
4413         case CHIP_POLARIS12:
4414         case CHIP_VEGAM:
4415         case CHIP_VEGA10:
4416         case CHIP_VEGA12:
4417         case CHIP_VEGA20:
4418                 if (dce110_register_irq_handlers(dm->adev)) {
4419                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4420                         goto fail;
4421                 }
4422                 break;
4423         default:
4424                 switch (adev->ip_versions[DCE_HWIP][0]) {
4425                 case IP_VERSION(1, 0, 0):
4426                 case IP_VERSION(1, 0, 1):
4427                 case IP_VERSION(2, 0, 2):
4428                 case IP_VERSION(2, 0, 3):
4429                 case IP_VERSION(2, 0, 0):
4430                 case IP_VERSION(2, 1, 0):
4431                 case IP_VERSION(3, 0, 0):
4432                 case IP_VERSION(3, 0, 2):
4433                 case IP_VERSION(3, 0, 3):
4434                 case IP_VERSION(3, 0, 1):
4435                 case IP_VERSION(3, 1, 2):
4436                 case IP_VERSION(3, 1, 3):
4437                 case IP_VERSION(3, 1, 5):
4438                 case IP_VERSION(3, 1, 6):
4439                         if (dcn10_register_irq_handlers(dm->adev)) {
4440                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4441                                 goto fail;
4442                         }
4443                         break;
4444                 default:
4445                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4446                                         adev->ip_versions[DCE_HWIP][0]);
4447                         goto fail;
4448                 }
4449                 break;
4450         }
4451
4452         return 0;
4453 fail:
4454         kfree(aencoder);
4455         kfree(aconnector);
4456
4457         return -EINVAL;
4458 }
4459
4460 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4461 {
4462         drm_atomic_private_obj_fini(&dm->atomic_obj);
4463         return;
4464 }
4465
4466 /******************************************************************************
4467  * amdgpu_display_funcs functions
4468  *****************************************************************************/
4469
4470 /*
4471  * dm_bandwidth_update - program display watermarks
4472  *
4473  * @adev: amdgpu_device pointer
4474  *
4475  * Calculate and program the display watermarks and line buffer allocation.
4476  */
4477 static void dm_bandwidth_update(struct amdgpu_device *adev)
4478 {
4479         /* TODO: implement later */
4480 }
4481
4482 static const struct amdgpu_display_funcs dm_display_funcs = {
4483         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4484         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4485         .backlight_set_level = NULL, /* never called for DC */
4486         .backlight_get_level = NULL, /* never called for DC */
4487         .hpd_sense = NULL,/* called unconditionally */
4488         .hpd_set_polarity = NULL, /* called unconditionally */
4489         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4490         .page_flip_get_scanoutpos =
4491                 dm_crtc_get_scanoutpos,/* called unconditionally */
4492         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4493         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4494 };
4495
4496 #if defined(CONFIG_DEBUG_KERNEL_DC)
4497
4498 static ssize_t s3_debug_store(struct device *device,
4499                               struct device_attribute *attr,
4500                               const char *buf,
4501                               size_t count)
4502 {
4503         int ret;
4504         int s3_state;
4505         struct drm_device *drm_dev = dev_get_drvdata(device);
4506         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4507
4508         ret = kstrtoint(buf, 0, &s3_state);
4509
4510         if (ret == 0) {
4511                 if (s3_state) {
4512                         dm_resume(adev);
4513                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4514                 } else
4515                         dm_suspend(adev);
4516         }
4517
4518         return ret == 0 ? count : 0;
4519 }
4520
4521 DEVICE_ATTR_WO(s3_debug);
4522
4523 #endif
4524
4525 static int dm_early_init(void *handle)
4526 {
4527         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4528
4529         switch (adev->asic_type) {
4530 #if defined(CONFIG_DRM_AMD_DC_SI)
4531         case CHIP_TAHITI:
4532         case CHIP_PITCAIRN:
4533         case CHIP_VERDE:
4534                 adev->mode_info.num_crtc = 6;
4535                 adev->mode_info.num_hpd = 6;
4536                 adev->mode_info.num_dig = 6;
4537                 break;
4538         case CHIP_OLAND:
4539                 adev->mode_info.num_crtc = 2;
4540                 adev->mode_info.num_hpd = 2;
4541                 adev->mode_info.num_dig = 2;
4542                 break;
4543 #endif
4544         case CHIP_BONAIRE:
4545         case CHIP_HAWAII:
4546                 adev->mode_info.num_crtc = 6;
4547                 adev->mode_info.num_hpd = 6;
4548                 adev->mode_info.num_dig = 6;
4549                 break;
4550         case CHIP_KAVERI:
4551                 adev->mode_info.num_crtc = 4;
4552                 adev->mode_info.num_hpd = 6;
4553                 adev->mode_info.num_dig = 7;
4554                 break;
4555         case CHIP_KABINI:
4556         case CHIP_MULLINS:
4557                 adev->mode_info.num_crtc = 2;
4558                 adev->mode_info.num_hpd = 6;
4559                 adev->mode_info.num_dig = 6;
4560                 break;
4561         case CHIP_FIJI:
4562         case CHIP_TONGA:
4563                 adev->mode_info.num_crtc = 6;
4564                 adev->mode_info.num_hpd = 6;
4565                 adev->mode_info.num_dig = 7;
4566                 break;
4567         case CHIP_CARRIZO:
4568                 adev->mode_info.num_crtc = 3;
4569                 adev->mode_info.num_hpd = 6;
4570                 adev->mode_info.num_dig = 9;
4571                 break;
4572         case CHIP_STONEY:
4573                 adev->mode_info.num_crtc = 2;
4574                 adev->mode_info.num_hpd = 6;
4575                 adev->mode_info.num_dig = 9;
4576                 break;
4577         case CHIP_POLARIS11:
4578         case CHIP_POLARIS12:
4579                 adev->mode_info.num_crtc = 5;
4580                 adev->mode_info.num_hpd = 5;
4581                 adev->mode_info.num_dig = 5;
4582                 break;
4583         case CHIP_POLARIS10:
4584         case CHIP_VEGAM:
4585                 adev->mode_info.num_crtc = 6;
4586                 adev->mode_info.num_hpd = 6;
4587                 adev->mode_info.num_dig = 6;
4588                 break;
4589         case CHIP_VEGA10:
4590         case CHIP_VEGA12:
4591         case CHIP_VEGA20:
4592                 adev->mode_info.num_crtc = 6;
4593                 adev->mode_info.num_hpd = 6;
4594                 adev->mode_info.num_dig = 6;
4595                 break;
4596         default:
4597
4598                 switch (adev->ip_versions[DCE_HWIP][0]) {
4599                 case IP_VERSION(2, 0, 2):
4600                 case IP_VERSION(3, 0, 0):
4601                         adev->mode_info.num_crtc = 6;
4602                         adev->mode_info.num_hpd = 6;
4603                         adev->mode_info.num_dig = 6;
4604                         break;
4605                 case IP_VERSION(2, 0, 0):
4606                 case IP_VERSION(3, 0, 2):
4607                         adev->mode_info.num_crtc = 5;
4608                         adev->mode_info.num_hpd = 5;
4609                         adev->mode_info.num_dig = 5;
4610                         break;
4611                 case IP_VERSION(2, 0, 3):
4612                 case IP_VERSION(3, 0, 3):
4613                         adev->mode_info.num_crtc = 2;
4614                         adev->mode_info.num_hpd = 2;
4615                         adev->mode_info.num_dig = 2;
4616                         break;
4617                 case IP_VERSION(1, 0, 0):
4618                 case IP_VERSION(1, 0, 1):
4619                 case IP_VERSION(3, 0, 1):
4620                 case IP_VERSION(2, 1, 0):
4621                 case IP_VERSION(3, 1, 2):
4622                 case IP_VERSION(3, 1, 3):
4623                 case IP_VERSION(3, 1, 5):
4624                 case IP_VERSION(3, 1, 6):
4625                         adev->mode_info.num_crtc = 4;
4626                         adev->mode_info.num_hpd = 4;
4627                         adev->mode_info.num_dig = 4;
4628                         break;
4629                 default:
4630                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4631                                         adev->ip_versions[DCE_HWIP][0]);
4632                         return -EINVAL;
4633                 }
4634                 break;
4635         }
4636
4637         amdgpu_dm_set_irq_funcs(adev);
4638
4639         if (adev->mode_info.funcs == NULL)
4640                 adev->mode_info.funcs = &dm_display_funcs;
4641
4642         /*
4643          * Note: Do NOT change adev->audio_endpt_rreg and
4644          * adev->audio_endpt_wreg because they are initialised in
4645          * amdgpu_device_init()
4646          */
4647 #if defined(CONFIG_DEBUG_KERNEL_DC)
4648         device_create_file(
4649                 adev_to_drm(adev)->dev,
4650                 &dev_attr_s3_debug);
4651 #endif
4652
4653         return 0;
4654 }
4655
4656 static bool modeset_required(struct drm_crtc_state *crtc_state,
4657                              struct dc_stream_state *new_stream,
4658                              struct dc_stream_state *old_stream)
4659 {
4660         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4661 }
4662
4663 static bool modereset_required(struct drm_crtc_state *crtc_state)
4664 {
4665         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4666 }
4667
4668 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4669 {
4670         drm_encoder_cleanup(encoder);
4671         kfree(encoder);
4672 }
4673
4674 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4675         .destroy = amdgpu_dm_encoder_destroy,
4676 };
4677
4678
4679 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4680                                          struct drm_framebuffer *fb,
4681                                          int *min_downscale, int *max_upscale)
4682 {
4683         struct amdgpu_device *adev = drm_to_adev(dev);
4684         struct dc *dc = adev->dm.dc;
4685         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4686         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4687
4688         switch (fb->format->format) {
4689         case DRM_FORMAT_P010:
4690         case DRM_FORMAT_NV12:
4691         case DRM_FORMAT_NV21:
4692                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4693                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4694                 break;
4695
4696         case DRM_FORMAT_XRGB16161616F:
4697         case DRM_FORMAT_ARGB16161616F:
4698         case DRM_FORMAT_XBGR16161616F:
4699         case DRM_FORMAT_ABGR16161616F:
4700                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4701                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4702                 break;
4703
4704         default:
4705                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4706                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4707                 break;
4708         }
4709
4710         /*
4711          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4712          * scaling factor of 1.0 == 1000 units.
4713          */
4714         if (*max_upscale == 1)
4715                 *max_upscale = 1000;
4716
4717         if (*min_downscale == 1)
4718                 *min_downscale = 1000;
4719 }
4720
4721
4722 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4723                                 const struct drm_plane_state *state,
4724                                 struct dc_scaling_info *scaling_info)
4725 {
4726         int scale_w, scale_h, min_downscale, max_upscale;
4727
4728         memset(scaling_info, 0, sizeof(*scaling_info));
4729
4730         /* Source is fixed 16.16 but we ignore mantissa for now... */
4731         scaling_info->src_rect.x = state->src_x >> 16;
4732         scaling_info->src_rect.y = state->src_y >> 16;
4733
4734         /*
4735          * For reasons we don't (yet) fully understand a non-zero
4736          * src_y coordinate into an NV12 buffer can cause a
4737          * system hang on DCN1x.
4738          * To avoid hangs (and maybe be overly cautious)
4739          * let's reject both non-zero src_x and src_y.
4740          *
4741          * We currently know of only one use-case to reproduce a
4742          * scenario with non-zero src_x and src_y for NV12, which
4743          * is to gesture the YouTube Android app into full screen
4744          * on ChromeOS.
4745          */
4746         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4747             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4748             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4749             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4750                 return -EINVAL;
4751
4752         scaling_info->src_rect.width = state->src_w >> 16;
4753         if (scaling_info->src_rect.width == 0)
4754                 return -EINVAL;
4755
4756         scaling_info->src_rect.height = state->src_h >> 16;
4757         if (scaling_info->src_rect.height == 0)
4758                 return -EINVAL;
4759
4760         scaling_info->dst_rect.x = state->crtc_x;
4761         scaling_info->dst_rect.y = state->crtc_y;
4762
4763         if (state->crtc_w == 0)
4764                 return -EINVAL;
4765
4766         scaling_info->dst_rect.width = state->crtc_w;
4767
4768         if (state->crtc_h == 0)
4769                 return -EINVAL;
4770
4771         scaling_info->dst_rect.height = state->crtc_h;
4772
4773         /* DRM doesn't specify clipping on destination output. */
4774         scaling_info->clip_rect = scaling_info->dst_rect;
4775
4776         /* Validate scaling per-format with DC plane caps */
4777         if (state->plane && state->plane->dev && state->fb) {
4778                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4779                                              &min_downscale, &max_upscale);
4780         } else {
4781                 min_downscale = 250;
4782                 max_upscale = 16000;
4783         }
4784
4785         scale_w = scaling_info->dst_rect.width * 1000 /
4786                   scaling_info->src_rect.width;
4787
4788         if (scale_w < min_downscale || scale_w > max_upscale)
4789                 return -EINVAL;
4790
4791         scale_h = scaling_info->dst_rect.height * 1000 /
4792                   scaling_info->src_rect.height;
4793
4794         if (scale_h < min_downscale || scale_h > max_upscale)
4795                 return -EINVAL;
4796
4797         /*
4798          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4799          * assume reasonable defaults based on the format.
4800          */
4801
4802         return 0;
4803 }
4804
4805 static void
4806 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4807                                  uint64_t tiling_flags)
4808 {
4809         /* Fill GFX8 params */
4810         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4811                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4812
4813                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4814                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4815                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4816                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4817                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4818
4819                 /* XXX fix me for VI */
4820                 tiling_info->gfx8.num_banks = num_banks;
4821                 tiling_info->gfx8.array_mode =
4822                                 DC_ARRAY_2D_TILED_THIN1;
4823                 tiling_info->gfx8.tile_split = tile_split;
4824                 tiling_info->gfx8.bank_width = bankw;
4825                 tiling_info->gfx8.bank_height = bankh;
4826                 tiling_info->gfx8.tile_aspect = mtaspect;
4827                 tiling_info->gfx8.tile_mode =
4828                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4829         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4830                         == DC_ARRAY_1D_TILED_THIN1) {
4831                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4832         }
4833
4834         tiling_info->gfx8.pipe_config =
4835                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4836 }
4837
4838 static void
4839 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4840                                   union dc_tiling_info *tiling_info)
4841 {
4842         tiling_info->gfx9.num_pipes =
4843                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4844         tiling_info->gfx9.num_banks =
4845                 adev->gfx.config.gb_addr_config_fields.num_banks;
4846         tiling_info->gfx9.pipe_interleave =
4847                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4848         tiling_info->gfx9.num_shader_engines =
4849                 adev->gfx.config.gb_addr_config_fields.num_se;
4850         tiling_info->gfx9.max_compressed_frags =
4851                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4852         tiling_info->gfx9.num_rb_per_se =
4853                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4854         tiling_info->gfx9.shaderEnable = 1;
4855         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4856                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4857 }
4858
4859 static int
4860 validate_dcc(struct amdgpu_device *adev,
4861              const enum surface_pixel_format format,
4862              const enum dc_rotation_angle rotation,
4863              const union dc_tiling_info *tiling_info,
4864              const struct dc_plane_dcc_param *dcc,
4865              const struct dc_plane_address *address,
4866              const struct plane_size *plane_size)
4867 {
4868         struct dc *dc = adev->dm.dc;
4869         struct dc_dcc_surface_param input;
4870         struct dc_surface_dcc_cap output;
4871
4872         memset(&input, 0, sizeof(input));
4873         memset(&output, 0, sizeof(output));
4874
4875         if (!dcc->enable)
4876                 return 0;
4877
4878         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4879             !dc->cap_funcs.get_dcc_compression_cap)
4880                 return -EINVAL;
4881
4882         input.format = format;
4883         input.surface_size.width = plane_size->surface_size.width;
4884         input.surface_size.height = plane_size->surface_size.height;
4885         input.swizzle_mode = tiling_info->gfx9.swizzle;
4886
4887         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4888                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4889         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4890                 input.scan = SCAN_DIRECTION_VERTICAL;
4891
4892         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4893                 return -EINVAL;
4894
4895         if (!output.capable)
4896                 return -EINVAL;
4897
4898         if (dcc->independent_64b_blks == 0 &&
4899             output.grph.rgb.independent_64b_blks != 0)
4900                 return -EINVAL;
4901
4902         return 0;
4903 }
4904
4905 static bool
4906 modifier_has_dcc(uint64_t modifier)
4907 {
4908         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4909 }
4910
4911 static unsigned
4912 modifier_gfx9_swizzle_mode(uint64_t modifier)
4913 {
4914         if (modifier == DRM_FORMAT_MOD_LINEAR)
4915                 return 0;
4916
4917         return AMD_FMT_MOD_GET(TILE, modifier);
4918 }
4919
4920 static const struct drm_format_info *
4921 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4922 {
4923         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4924 }
4925
4926 static void
4927 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4928                                     union dc_tiling_info *tiling_info,
4929                                     uint64_t modifier)
4930 {
4931         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4932         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4933         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4934         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4935
4936         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4937
4938         if (!IS_AMD_FMT_MOD(modifier))
4939                 return;
4940
4941         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4942         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4943
4944         if (adev->family >= AMDGPU_FAMILY_NV) {
4945                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4946         } else {
4947                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4948
4949                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4950         }
4951 }
4952
4953 enum dm_micro_swizzle {
4954         MICRO_SWIZZLE_Z = 0,
4955         MICRO_SWIZZLE_S = 1,
4956         MICRO_SWIZZLE_D = 2,
4957         MICRO_SWIZZLE_R = 3
4958 };
4959
4960 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4961                                           uint32_t format,
4962                                           uint64_t modifier)
4963 {
4964         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4965         const struct drm_format_info *info = drm_format_info(format);
4966         int i;
4967
4968         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4969
4970         if (!info)
4971                 return false;
4972
4973         /*
4974          * We always have to allow these modifiers:
4975          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4976          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4977          */
4978         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4979             modifier == DRM_FORMAT_MOD_INVALID) {
4980                 return true;
4981         }
4982
4983         /* Check that the modifier is on the list of the plane's supported modifiers. */
4984         for (i = 0; i < plane->modifier_count; i++) {
4985                 if (modifier == plane->modifiers[i])
4986                         break;
4987         }
4988         if (i == plane->modifier_count)
4989                 return false;
4990
4991         /*
4992          * For D swizzle the canonical modifier depends on the bpp, so check
4993          * it here.
4994          */
4995         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4996             adev->family >= AMDGPU_FAMILY_NV) {
4997                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4998                         return false;
4999         }
5000
5001         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5002             info->cpp[0] < 8)
5003                 return false;
5004
5005         if (modifier_has_dcc(modifier)) {
5006                 /* Per radeonsi comments 16/64 bpp are more complicated. */
5007                 if (info->cpp[0] != 4)
5008                         return false;
5009                 /* We support multi-planar formats, but not when combined with
5010                  * additional DCC metadata planes. */
5011                 if (info->num_planes > 1)
5012                         return false;
5013         }
5014
5015         return true;
5016 }
5017
5018 static void
5019 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5020 {
5021         if (!*mods)
5022                 return;
5023
5024         if (*cap - *size < 1) {
5025                 uint64_t new_cap = *cap * 2;
5026                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5027
5028                 if (!new_mods) {
5029                         kfree(*mods);
5030                         *mods = NULL;
5031                         return;
5032                 }
5033
5034                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5035                 kfree(*mods);
5036                 *mods = new_mods;
5037                 *cap = new_cap;
5038         }
5039
5040         (*mods)[*size] = mod;
5041         *size += 1;
5042 }
5043
5044 static void
5045 add_gfx9_modifiers(const struct amdgpu_device *adev,
5046                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5047 {
5048         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5049         int pipe_xor_bits = min(8, pipes +
5050                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5051         int bank_xor_bits = min(8 - pipe_xor_bits,
5052                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5053         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5054                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5055
5056
5057         if (adev->family == AMDGPU_FAMILY_RV) {
5058                 /* Raven2 and later */
5059                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5060
5061                 /*
5062                  * No _D DCC swizzles yet because we only allow 32bpp, which
5063                  * doesn't support _D on DCN
5064                  */
5065
5066                 if (has_constant_encode) {
5067                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5069                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5070                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5072                                     AMD_FMT_MOD_SET(DCC, 1) |
5073                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5075                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5076                 }
5077
5078                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5080                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5081                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5083                             AMD_FMT_MOD_SET(DCC, 1) |
5084                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5085                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5086                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5087
5088                 if (has_constant_encode) {
5089                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5091                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5092                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5094                                     AMD_FMT_MOD_SET(DCC, 1) |
5095                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5096                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5097                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5098
5099                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5100                                     AMD_FMT_MOD_SET(RB, rb) |
5101                                     AMD_FMT_MOD_SET(PIPE, pipes));
5102                 }
5103
5104                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5106                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5107                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5108                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5109                             AMD_FMT_MOD_SET(DCC, 1) |
5110                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5111                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5112                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5113                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5114                             AMD_FMT_MOD_SET(RB, rb) |
5115                             AMD_FMT_MOD_SET(PIPE, pipes));
5116         }
5117
5118         /*
5119          * Only supported for 64bpp on Raven, will be filtered on format in
5120          * dm_plane_format_mod_supported.
5121          */
5122         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5124                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5125                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5126                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5127
5128         if (adev->family == AMDGPU_FAMILY_RV) {
5129                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5131                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5132                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5133                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5134         }
5135
5136         /*
5137          * Only supported for 64bpp on Raven, will be filtered on format in
5138          * dm_plane_format_mod_supported.
5139          */
5140         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5141                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5142                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5143
5144         if (adev->family == AMDGPU_FAMILY_RV) {
5145                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5147                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5148         }
5149 }
5150
5151 static void
5152 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5153                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5154 {
5155         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5156
5157         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5160                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161                     AMD_FMT_MOD_SET(DCC, 1) |
5162                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5163                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5164                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5165
5166         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5169                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170                     AMD_FMT_MOD_SET(DCC, 1) |
5171                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5172                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5173                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5174                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5175
5176         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5177                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5178                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5179                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5180
5181         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5183                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5184                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5185
5186
5187         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5188         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5190                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5191
5192         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5194                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5195 }
5196
5197 static void
5198 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5199                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5200 {
5201         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5202         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5203
5204         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5205                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5206                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5207                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5208                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5209                     AMD_FMT_MOD_SET(DCC, 1) |
5210                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5211                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5212                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5213                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5214
5215         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5216                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5217                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5218                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5219                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5220                     AMD_FMT_MOD_SET(DCC, 1) |
5221                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5222                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5223                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5224
5225         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5227                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5228                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5229                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5230                     AMD_FMT_MOD_SET(DCC, 1) |
5231                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5232                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5233                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5234                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5235                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5236
5237         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5238                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5239                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5240                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5241                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5242                     AMD_FMT_MOD_SET(DCC, 1) |
5243                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5244                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5245                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5246                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5247
5248         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5249                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5250                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5251                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5252                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5253
5254         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5255                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5256                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5257                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5258                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5259
5260         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5261         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5262                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5263                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5264
5265         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5266                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5267                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5268 }
5269
5270 static int
5271 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5272 {
5273         uint64_t size = 0, capacity = 128;
5274         *mods = NULL;
5275
5276         /* We have not hooked up any pre-GFX9 modifiers. */
5277         if (adev->family < AMDGPU_FAMILY_AI)
5278                 return 0;
5279
5280         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5281
5282         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5283                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5284                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5285                 return *mods ? 0 : -ENOMEM;
5286         }
5287
5288         switch (adev->family) {
5289         case AMDGPU_FAMILY_AI:
5290         case AMDGPU_FAMILY_RV:
5291                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5292                 break;
5293         case AMDGPU_FAMILY_NV:
5294         case AMDGPU_FAMILY_VGH:
5295         case AMDGPU_FAMILY_YC:
5296         case AMDGPU_FAMILY_GC_10_3_6:
5297         case AMDGPU_FAMILY_GC_10_3_7:
5298                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5299                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5300                 else
5301                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5302                 break;
5303         }
5304
5305         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5306
5307         /* INVALID marks the end of the list. */
5308         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5309
5310         if (!*mods)
5311                 return -ENOMEM;
5312
5313         return 0;
5314 }
5315
5316 static int
5317 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5318                                           const struct amdgpu_framebuffer *afb,
5319                                           const enum surface_pixel_format format,
5320                                           const enum dc_rotation_angle rotation,
5321                                           const struct plane_size *plane_size,
5322                                           union dc_tiling_info *tiling_info,
5323                                           struct dc_plane_dcc_param *dcc,
5324                                           struct dc_plane_address *address,
5325                                           const bool force_disable_dcc)
5326 {
5327         const uint64_t modifier = afb->base.modifier;
5328         int ret = 0;
5329
5330         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5331         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5332
5333         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5334                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5335                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5336                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5337
5338                 dcc->enable = 1;
5339                 dcc->meta_pitch = afb->base.pitches[1];
5340                 dcc->independent_64b_blks = independent_64b_blks;
5341                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5342                         if (independent_64b_blks && independent_128b_blks)
5343                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5344                         else if (independent_128b_blks)
5345                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5346                         else if (independent_64b_blks && !independent_128b_blks)
5347                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5348                         else
5349                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5350                 } else {
5351                         if (independent_64b_blks)
5352                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5353                         else
5354                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5355                 }
5356
5357                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5358                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5359         }
5360
5361         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5362         if (ret)
5363                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5364
5365         return ret;
5366 }
5367
5368 static int
5369 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5370                              const struct amdgpu_framebuffer *afb,
5371                              const enum surface_pixel_format format,
5372                              const enum dc_rotation_angle rotation,
5373                              const uint64_t tiling_flags,
5374                              union dc_tiling_info *tiling_info,
5375                              struct plane_size *plane_size,
5376                              struct dc_plane_dcc_param *dcc,
5377                              struct dc_plane_address *address,
5378                              bool tmz_surface,
5379                              bool force_disable_dcc)
5380 {
5381         const struct drm_framebuffer *fb = &afb->base;
5382         int ret;
5383
5384         memset(tiling_info, 0, sizeof(*tiling_info));
5385         memset(plane_size, 0, sizeof(*plane_size));
5386         memset(dcc, 0, sizeof(*dcc));
5387         memset(address, 0, sizeof(*address));
5388
5389         address->tmz_surface = tmz_surface;
5390
5391         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5392                 uint64_t addr = afb->address + fb->offsets[0];
5393
5394                 plane_size->surface_size.x = 0;
5395                 plane_size->surface_size.y = 0;
5396                 plane_size->surface_size.width = fb->width;
5397                 plane_size->surface_size.height = fb->height;
5398                 plane_size->surface_pitch =
5399                         fb->pitches[0] / fb->format->cpp[0];
5400
5401                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5402                 address->grph.addr.low_part = lower_32_bits(addr);
5403                 address->grph.addr.high_part = upper_32_bits(addr);
5404         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5405                 uint64_t luma_addr = afb->address + fb->offsets[0];
5406                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5407
5408                 plane_size->surface_size.x = 0;
5409                 plane_size->surface_size.y = 0;
5410                 plane_size->surface_size.width = fb->width;
5411                 plane_size->surface_size.height = fb->height;
5412                 plane_size->surface_pitch =
5413                         fb->pitches[0] / fb->format->cpp[0];
5414
5415                 plane_size->chroma_size.x = 0;
5416                 plane_size->chroma_size.y = 0;
5417                 /* TODO: set these based on surface format */
5418                 plane_size->chroma_size.width = fb->width / 2;
5419                 plane_size->chroma_size.height = fb->height / 2;
5420
5421                 plane_size->chroma_pitch =
5422                         fb->pitches[1] / fb->format->cpp[1];
5423
5424                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5425                 address->video_progressive.luma_addr.low_part =
5426                         lower_32_bits(luma_addr);
5427                 address->video_progressive.luma_addr.high_part =
5428                         upper_32_bits(luma_addr);
5429                 address->video_progressive.chroma_addr.low_part =
5430                         lower_32_bits(chroma_addr);
5431                 address->video_progressive.chroma_addr.high_part =
5432                         upper_32_bits(chroma_addr);
5433         }
5434
5435         if (adev->family >= AMDGPU_FAMILY_AI) {
5436                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5437                                                                 rotation, plane_size,
5438                                                                 tiling_info, dcc,
5439                                                                 address,
5440                                                                 force_disable_dcc);
5441                 if (ret)
5442                         return ret;
5443         } else {
5444                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5445         }
5446
5447         return 0;
5448 }
5449
5450 static void
5451 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5452                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5453                                bool *global_alpha, int *global_alpha_value)
5454 {
5455         *per_pixel_alpha = false;
5456         *pre_multiplied_alpha = true;
5457         *global_alpha = false;
5458         *global_alpha_value = 0xff;
5459
5460         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5461                 return;
5462
5463         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5464                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5465                 static const uint32_t alpha_formats[] = {
5466                         DRM_FORMAT_ARGB8888,
5467                         DRM_FORMAT_RGBA8888,
5468                         DRM_FORMAT_ABGR8888,
5469                 };
5470                 uint32_t format = plane_state->fb->format->format;
5471                 unsigned int i;
5472
5473                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5474                         if (format == alpha_formats[i]) {
5475                                 *per_pixel_alpha = true;
5476                                 break;
5477                         }
5478                 }
5479
5480                 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5481                         *pre_multiplied_alpha = false;
5482         }
5483
5484         if (plane_state->alpha < 0xffff) {
5485                 *global_alpha = true;
5486                 *global_alpha_value = plane_state->alpha >> 8;
5487         }
5488 }
5489
5490 static int
5491 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5492                             const enum surface_pixel_format format,
5493                             enum dc_color_space *color_space)
5494 {
5495         bool full_range;
5496
5497         *color_space = COLOR_SPACE_SRGB;
5498
5499         /* DRM color properties only affect non-RGB formats. */
5500         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5501                 return 0;
5502
5503         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5504
5505         switch (plane_state->color_encoding) {
5506         case DRM_COLOR_YCBCR_BT601:
5507                 if (full_range)
5508                         *color_space = COLOR_SPACE_YCBCR601;
5509                 else
5510                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5511                 break;
5512
5513         case DRM_COLOR_YCBCR_BT709:
5514                 if (full_range)
5515                         *color_space = COLOR_SPACE_YCBCR709;
5516                 else
5517                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5518                 break;
5519
5520         case DRM_COLOR_YCBCR_BT2020:
5521                 if (full_range)
5522                         *color_space = COLOR_SPACE_2020_YCBCR;
5523                 else
5524                         return -EINVAL;
5525                 break;
5526
5527         default:
5528                 return -EINVAL;
5529         }
5530
5531         return 0;
5532 }
5533
5534 static int
5535 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5536                             const struct drm_plane_state *plane_state,
5537                             const uint64_t tiling_flags,
5538                             struct dc_plane_info *plane_info,
5539                             struct dc_plane_address *address,
5540                             bool tmz_surface,
5541                             bool force_disable_dcc)
5542 {
5543         const struct drm_framebuffer *fb = plane_state->fb;
5544         const struct amdgpu_framebuffer *afb =
5545                 to_amdgpu_framebuffer(plane_state->fb);
5546         int ret;
5547
5548         memset(plane_info, 0, sizeof(*plane_info));
5549
5550         switch (fb->format->format) {
5551         case DRM_FORMAT_C8:
5552                 plane_info->format =
5553                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5554                 break;
5555         case DRM_FORMAT_RGB565:
5556                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5557                 break;
5558         case DRM_FORMAT_XRGB8888:
5559         case DRM_FORMAT_ARGB8888:
5560                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5561                 break;
5562         case DRM_FORMAT_XRGB2101010:
5563         case DRM_FORMAT_ARGB2101010:
5564                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5565                 break;
5566         case DRM_FORMAT_XBGR2101010:
5567         case DRM_FORMAT_ABGR2101010:
5568                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5569                 break;
5570         case DRM_FORMAT_XBGR8888:
5571         case DRM_FORMAT_ABGR8888:
5572                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5573                 break;
5574         case DRM_FORMAT_NV21:
5575                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5576                 break;
5577         case DRM_FORMAT_NV12:
5578                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5579                 break;
5580         case DRM_FORMAT_P010:
5581                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5582                 break;
5583         case DRM_FORMAT_XRGB16161616F:
5584         case DRM_FORMAT_ARGB16161616F:
5585                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5586                 break;
5587         case DRM_FORMAT_XBGR16161616F:
5588         case DRM_FORMAT_ABGR16161616F:
5589                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5590                 break;
5591         case DRM_FORMAT_XRGB16161616:
5592         case DRM_FORMAT_ARGB16161616:
5593                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5594                 break;
5595         case DRM_FORMAT_XBGR16161616:
5596         case DRM_FORMAT_ABGR16161616:
5597                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5598                 break;
5599         default:
5600                 DRM_ERROR(
5601                         "Unsupported screen format %p4cc\n",
5602                         &fb->format->format);
5603                 return -EINVAL;
5604         }
5605
5606         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5607         case DRM_MODE_ROTATE_0:
5608                 plane_info->rotation = ROTATION_ANGLE_0;
5609                 break;
5610         case DRM_MODE_ROTATE_90:
5611                 plane_info->rotation = ROTATION_ANGLE_90;
5612                 break;
5613         case DRM_MODE_ROTATE_180:
5614                 plane_info->rotation = ROTATION_ANGLE_180;
5615                 break;
5616         case DRM_MODE_ROTATE_270:
5617                 plane_info->rotation = ROTATION_ANGLE_270;
5618                 break;
5619         default:
5620                 plane_info->rotation = ROTATION_ANGLE_0;
5621                 break;
5622         }
5623
5624         plane_info->visible = true;
5625         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5626
5627         plane_info->layer_index = 0;
5628
5629         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5630                                           &plane_info->color_space);
5631         if (ret)
5632                 return ret;
5633
5634         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5635                                            plane_info->rotation, tiling_flags,
5636                                            &plane_info->tiling_info,
5637                                            &plane_info->plane_size,
5638                                            &plane_info->dcc, address, tmz_surface,
5639                                            force_disable_dcc);
5640         if (ret)
5641                 return ret;
5642
5643         fill_blending_from_plane_state(
5644                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5645                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5646
5647         return 0;
5648 }
5649
5650 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5651                                     struct dc_plane_state *dc_plane_state,
5652                                     struct drm_plane_state *plane_state,
5653                                     struct drm_crtc_state *crtc_state)
5654 {
5655         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5656         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5657         struct dc_scaling_info scaling_info;
5658         struct dc_plane_info plane_info;
5659         int ret;
5660         bool force_disable_dcc = false;
5661
5662         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5663         if (ret)
5664                 return ret;
5665
5666         dc_plane_state->src_rect = scaling_info.src_rect;
5667         dc_plane_state->dst_rect = scaling_info.dst_rect;
5668         dc_plane_state->clip_rect = scaling_info.clip_rect;
5669         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5670
5671         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5672         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5673                                           afb->tiling_flags,
5674                                           &plane_info,
5675                                           &dc_plane_state->address,
5676                                           afb->tmz_surface,
5677                                           force_disable_dcc);
5678         if (ret)
5679                 return ret;
5680
5681         dc_plane_state->format = plane_info.format;
5682         dc_plane_state->color_space = plane_info.color_space;
5683         dc_plane_state->format = plane_info.format;
5684         dc_plane_state->plane_size = plane_info.plane_size;
5685         dc_plane_state->rotation = plane_info.rotation;
5686         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5687         dc_plane_state->stereo_format = plane_info.stereo_format;
5688         dc_plane_state->tiling_info = plane_info.tiling_info;
5689         dc_plane_state->visible = plane_info.visible;
5690         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5691         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5692         dc_plane_state->global_alpha = plane_info.global_alpha;
5693         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5694         dc_plane_state->dcc = plane_info.dcc;
5695         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5696         dc_plane_state->flip_int_enabled = true;
5697
5698         /*
5699          * Always set input transfer function, since plane state is refreshed
5700          * every time.
5701          */
5702         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5703         if (ret)
5704                 return ret;
5705
5706         return 0;
5707 }
5708
5709 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5710                                            const struct dm_connector_state *dm_state,
5711                                            struct dc_stream_state *stream)
5712 {
5713         enum amdgpu_rmx_type rmx_type;
5714
5715         struct rect src = { 0 }; /* viewport in composition space*/
5716         struct rect dst = { 0 }; /* stream addressable area */
5717
5718         /* no mode. nothing to be done */
5719         if (!mode)
5720                 return;
5721
5722         /* Full screen scaling by default */
5723         src.width = mode->hdisplay;
5724         src.height = mode->vdisplay;
5725         dst.width = stream->timing.h_addressable;
5726         dst.height = stream->timing.v_addressable;
5727
5728         if (dm_state) {
5729                 rmx_type = dm_state->scaling;
5730                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5731                         if (src.width * dst.height <
5732                                         src.height * dst.width) {
5733                                 /* height needs less upscaling/more downscaling */
5734                                 dst.width = src.width *
5735                                                 dst.height / src.height;
5736                         } else {
5737                                 /* width needs less upscaling/more downscaling */
5738                                 dst.height = src.height *
5739                                                 dst.width / src.width;
5740                         }
5741                 } else if (rmx_type == RMX_CENTER) {
5742                         dst = src;
5743                 }
5744
5745                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5746                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5747
5748                 if (dm_state->underscan_enable) {
5749                         dst.x += dm_state->underscan_hborder / 2;
5750                         dst.y += dm_state->underscan_vborder / 2;
5751                         dst.width -= dm_state->underscan_hborder;
5752                         dst.height -= dm_state->underscan_vborder;
5753                 }
5754         }
5755
5756         stream->src = src;
5757         stream->dst = dst;
5758
5759         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5760                       dst.x, dst.y, dst.width, dst.height);
5761
5762 }
5763
5764 static enum dc_color_depth
5765 convert_color_depth_from_display_info(const struct drm_connector *connector,
5766                                       bool is_y420, int requested_bpc)
5767 {
5768         uint8_t bpc;
5769
5770         if (is_y420) {
5771                 bpc = 8;
5772
5773                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5774                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5775                         bpc = 16;
5776                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5777                         bpc = 12;
5778                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5779                         bpc = 10;
5780         } else {
5781                 bpc = (uint8_t)connector->display_info.bpc;
5782                 /* Assume 8 bpc by default if no bpc is specified. */
5783                 bpc = bpc ? bpc : 8;
5784         }
5785
5786         if (requested_bpc > 0) {
5787                 /*
5788                  * Cap display bpc based on the user requested value.
5789                  *
5790                  * The value for state->max_bpc may not correctly updated
5791                  * depending on when the connector gets added to the state
5792                  * or if this was called outside of atomic check, so it
5793                  * can't be used directly.
5794                  */
5795                 bpc = min_t(u8, bpc, requested_bpc);
5796
5797                 /* Round down to the nearest even number. */
5798                 bpc = bpc - (bpc & 1);
5799         }
5800
5801         switch (bpc) {
5802         case 0:
5803                 /*
5804                  * Temporary Work around, DRM doesn't parse color depth for
5805                  * EDID revision before 1.4
5806                  * TODO: Fix edid parsing
5807                  */
5808                 return COLOR_DEPTH_888;
5809         case 6:
5810                 return COLOR_DEPTH_666;
5811         case 8:
5812                 return COLOR_DEPTH_888;
5813         case 10:
5814                 return COLOR_DEPTH_101010;
5815         case 12:
5816                 return COLOR_DEPTH_121212;
5817         case 14:
5818                 return COLOR_DEPTH_141414;
5819         case 16:
5820                 return COLOR_DEPTH_161616;
5821         default:
5822                 return COLOR_DEPTH_UNDEFINED;
5823         }
5824 }
5825
5826 static enum dc_aspect_ratio
5827 get_aspect_ratio(const struct drm_display_mode *mode_in)
5828 {
5829         /* 1-1 mapping, since both enums follow the HDMI spec. */
5830         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5831 }
5832
5833 static enum dc_color_space
5834 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5835 {
5836         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5837
5838         switch (dc_crtc_timing->pixel_encoding) {
5839         case PIXEL_ENCODING_YCBCR422:
5840         case PIXEL_ENCODING_YCBCR444:
5841         case PIXEL_ENCODING_YCBCR420:
5842         {
5843                 /*
5844                  * 27030khz is the separation point between HDTV and SDTV
5845                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5846                  * respectively
5847                  */
5848                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5849                         if (dc_crtc_timing->flags.Y_ONLY)
5850                                 color_space =
5851                                         COLOR_SPACE_YCBCR709_LIMITED;
5852                         else
5853                                 color_space = COLOR_SPACE_YCBCR709;
5854                 } else {
5855                         if (dc_crtc_timing->flags.Y_ONLY)
5856                                 color_space =
5857                                         COLOR_SPACE_YCBCR601_LIMITED;
5858                         else
5859                                 color_space = COLOR_SPACE_YCBCR601;
5860                 }
5861
5862         }
5863         break;
5864         case PIXEL_ENCODING_RGB:
5865                 color_space = COLOR_SPACE_SRGB;
5866                 break;
5867
5868         default:
5869                 WARN_ON(1);
5870                 break;
5871         }
5872
5873         return color_space;
5874 }
5875
5876 static bool adjust_colour_depth_from_display_info(
5877         struct dc_crtc_timing *timing_out,
5878         const struct drm_display_info *info)
5879 {
5880         enum dc_color_depth depth = timing_out->display_color_depth;
5881         int normalized_clk;
5882         do {
5883                 normalized_clk = timing_out->pix_clk_100hz / 10;
5884                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5885                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5886                         normalized_clk /= 2;
5887                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5888                 switch (depth) {
5889                 case COLOR_DEPTH_888:
5890                         break;
5891                 case COLOR_DEPTH_101010:
5892                         normalized_clk = (normalized_clk * 30) / 24;
5893                         break;
5894                 case COLOR_DEPTH_121212:
5895                         normalized_clk = (normalized_clk * 36) / 24;
5896                         break;
5897                 case COLOR_DEPTH_161616:
5898                         normalized_clk = (normalized_clk * 48) / 24;
5899                         break;
5900                 default:
5901                         /* The above depths are the only ones valid for HDMI. */
5902                         return false;
5903                 }
5904                 if (normalized_clk <= info->max_tmds_clock) {
5905                         timing_out->display_color_depth = depth;
5906                         return true;
5907                 }
5908         } while (--depth > COLOR_DEPTH_666);
5909         return false;
5910 }
5911
5912 static void fill_stream_properties_from_drm_display_mode(
5913         struct dc_stream_state *stream,
5914         const struct drm_display_mode *mode_in,
5915         const struct drm_connector *connector,
5916         const struct drm_connector_state *connector_state,
5917         const struct dc_stream_state *old_stream,
5918         int requested_bpc)
5919 {
5920         struct dc_crtc_timing *timing_out = &stream->timing;
5921         const struct drm_display_info *info = &connector->display_info;
5922         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5923         struct hdmi_vendor_infoframe hv_frame;
5924         struct hdmi_avi_infoframe avi_frame;
5925
5926         memset(&hv_frame, 0, sizeof(hv_frame));
5927         memset(&avi_frame, 0, sizeof(avi_frame));
5928
5929         timing_out->h_border_left = 0;
5930         timing_out->h_border_right = 0;
5931         timing_out->v_border_top = 0;
5932         timing_out->v_border_bottom = 0;
5933         /* TODO: un-hardcode */
5934         if (drm_mode_is_420_only(info, mode_in)
5935                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5936                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5937         else if (drm_mode_is_420_also(info, mode_in)
5938                         && aconnector->force_yuv420_output)
5939                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5940         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5941                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5942                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5943         else
5944                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5945
5946         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5947         timing_out->display_color_depth = convert_color_depth_from_display_info(
5948                 connector,
5949                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5950                 requested_bpc);
5951         timing_out->scan_type = SCANNING_TYPE_NODATA;
5952         timing_out->hdmi_vic = 0;
5953
5954         if(old_stream) {
5955                 timing_out->vic = old_stream->timing.vic;
5956                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5957                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5958         } else {
5959                 timing_out->vic = drm_match_cea_mode(mode_in);
5960                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5961                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5962                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5963                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5964         }
5965
5966         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5967                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5968                 timing_out->vic = avi_frame.video_code;
5969                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5970                 timing_out->hdmi_vic = hv_frame.vic;
5971         }
5972
5973         if (is_freesync_video_mode(mode_in, aconnector)) {
5974                 timing_out->h_addressable = mode_in->hdisplay;
5975                 timing_out->h_total = mode_in->htotal;
5976                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5977                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5978                 timing_out->v_total = mode_in->vtotal;
5979                 timing_out->v_addressable = mode_in->vdisplay;
5980                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5981                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5982                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5983         } else {
5984                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5985                 timing_out->h_total = mode_in->crtc_htotal;
5986                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5987                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5988                 timing_out->v_total = mode_in->crtc_vtotal;
5989                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5990                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5991                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5992                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5993         }
5994
5995         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5996
5997         stream->output_color_space = get_output_color_space(timing_out);
5998
5999         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6000         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6001         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6002                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6003                     drm_mode_is_420_also(info, mode_in) &&
6004                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6005                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6006                         adjust_colour_depth_from_display_info(timing_out, info);
6007                 }
6008         }
6009 }
6010
6011 static void fill_audio_info(struct audio_info *audio_info,
6012                             const struct drm_connector *drm_connector,
6013                             const struct dc_sink *dc_sink)
6014 {
6015         int i = 0;
6016         int cea_revision = 0;
6017         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6018
6019         audio_info->manufacture_id = edid_caps->manufacturer_id;
6020         audio_info->product_id = edid_caps->product_id;
6021
6022         cea_revision = drm_connector->display_info.cea_rev;
6023
6024         strscpy(audio_info->display_name,
6025                 edid_caps->display_name,
6026                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6027
6028         if (cea_revision >= 3) {
6029                 audio_info->mode_count = edid_caps->audio_mode_count;
6030
6031                 for (i = 0; i < audio_info->mode_count; ++i) {
6032                         audio_info->modes[i].format_code =
6033                                         (enum audio_format_code)
6034                                         (edid_caps->audio_modes[i].format_code);
6035                         audio_info->modes[i].channel_count =
6036                                         edid_caps->audio_modes[i].channel_count;
6037                         audio_info->modes[i].sample_rates.all =
6038                                         edid_caps->audio_modes[i].sample_rate;
6039                         audio_info->modes[i].sample_size =
6040                                         edid_caps->audio_modes[i].sample_size;
6041                 }
6042         }
6043
6044         audio_info->flags.all = edid_caps->speaker_flags;
6045
6046         /* TODO: We only check for the progressive mode, check for interlace mode too */
6047         if (drm_connector->latency_present[0]) {
6048                 audio_info->video_latency = drm_connector->video_latency[0];
6049                 audio_info->audio_latency = drm_connector->audio_latency[0];
6050         }
6051
6052         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6053
6054 }
6055
6056 static void
6057 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6058                                       struct drm_display_mode *dst_mode)
6059 {
6060         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6061         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6062         dst_mode->crtc_clock = src_mode->crtc_clock;
6063         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6064         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6065         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6066         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6067         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6068         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6069         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6070         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6071         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6072         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6073         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6074 }
6075
6076 static void
6077 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6078                                         const struct drm_display_mode *native_mode,
6079                                         bool scale_enabled)
6080 {
6081         if (scale_enabled) {
6082                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6083         } else if (native_mode->clock == drm_mode->clock &&
6084                         native_mode->htotal == drm_mode->htotal &&
6085                         native_mode->vtotal == drm_mode->vtotal) {
6086                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6087         } else {
6088                 /* no scaling nor amdgpu inserted, no need to patch */
6089         }
6090 }
6091
6092 static struct dc_sink *
6093 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6094 {
6095         struct dc_sink_init_data sink_init_data = { 0 };
6096         struct dc_sink *sink = NULL;
6097         sink_init_data.link = aconnector->dc_link;
6098         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6099
6100         sink = dc_sink_create(&sink_init_data);
6101         if (!sink) {
6102                 DRM_ERROR("Failed to create sink!\n");
6103                 return NULL;
6104         }
6105         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6106
6107         return sink;
6108 }
6109
6110 static void set_multisync_trigger_params(
6111                 struct dc_stream_state *stream)
6112 {
6113         struct dc_stream_state *master = NULL;
6114
6115         if (stream->triggered_crtc_reset.enabled) {
6116                 master = stream->triggered_crtc_reset.event_source;
6117                 stream->triggered_crtc_reset.event =
6118                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6119                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6120                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6121         }
6122 }
6123
6124 static void set_master_stream(struct dc_stream_state *stream_set[],
6125                               int stream_count)
6126 {
6127         int j, highest_rfr = 0, master_stream = 0;
6128
6129         for (j = 0;  j < stream_count; j++) {
6130                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6131                         int refresh_rate = 0;
6132
6133                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6134                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6135                         if (refresh_rate > highest_rfr) {
6136                                 highest_rfr = refresh_rate;
6137                                 master_stream = j;
6138                         }
6139                 }
6140         }
6141         for (j = 0;  j < stream_count; j++) {
6142                 if (stream_set[j])
6143                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6144         }
6145 }
6146
6147 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6148 {
6149         int i = 0;
6150         struct dc_stream_state *stream;
6151
6152         if (context->stream_count < 2)
6153                 return;
6154         for (i = 0; i < context->stream_count ; i++) {
6155                 if (!context->streams[i])
6156                         continue;
6157                 /*
6158                  * TODO: add a function to read AMD VSDB bits and set
6159                  * crtc_sync_master.multi_sync_enabled flag
6160                  * For now it's set to false
6161                  */
6162         }
6163
6164         set_master_stream(context->streams, context->stream_count);
6165
6166         for (i = 0; i < context->stream_count ; i++) {
6167                 stream = context->streams[i];
6168
6169                 if (!stream)
6170                         continue;
6171
6172                 set_multisync_trigger_params(stream);
6173         }
6174 }
6175
6176 #if defined(CONFIG_DRM_AMD_DC_DCN)
6177 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6178                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6179                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6180 {
6181         stream->timing.flags.DSC = 0;
6182         dsc_caps->is_dsc_supported = false;
6183
6184         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6185                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6186                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6187                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6188                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6189                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6190                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6191                                 dsc_caps);
6192         }
6193 }
6194
6195 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6196                                     struct dc_sink *sink, struct dc_stream_state *stream,
6197                                     struct dsc_dec_dpcd_caps *dsc_caps,
6198                                     uint32_t max_dsc_target_bpp_limit_override)
6199 {
6200         const struct dc_link_settings *verified_link_cap = NULL;
6201         uint32_t link_bw_in_kbps;
6202         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6203         struct dc *dc = sink->ctx->dc;
6204         struct dc_dsc_bw_range bw_range = {0};
6205         struct dc_dsc_config dsc_cfg = {0};
6206
6207         verified_link_cap = dc_link_get_link_cap(stream->link);
6208         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6209         edp_min_bpp_x16 = 8 * 16;
6210         edp_max_bpp_x16 = 8 * 16;
6211
6212         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6213                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6214
6215         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6216                 edp_min_bpp_x16 = edp_max_bpp_x16;
6217
6218         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6219                                 dc->debug.dsc_min_slice_height_override,
6220                                 edp_min_bpp_x16, edp_max_bpp_x16,
6221                                 dsc_caps,
6222                                 &stream->timing,
6223                                 &bw_range)) {
6224
6225                 if (bw_range.max_kbps < link_bw_in_kbps) {
6226                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6227                                         dsc_caps,
6228                                         dc->debug.dsc_min_slice_height_override,
6229                                         max_dsc_target_bpp_limit_override,
6230                                         0,
6231                                         &stream->timing,
6232                                         &dsc_cfg)) {
6233                                 stream->timing.dsc_cfg = dsc_cfg;
6234                                 stream->timing.flags.DSC = 1;
6235                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6236                         }
6237                         return;
6238                 }
6239         }
6240
6241         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6242                                 dsc_caps,
6243                                 dc->debug.dsc_min_slice_height_override,
6244                                 max_dsc_target_bpp_limit_override,
6245                                 link_bw_in_kbps,
6246                                 &stream->timing,
6247                                 &dsc_cfg)) {
6248                 stream->timing.dsc_cfg = dsc_cfg;
6249                 stream->timing.flags.DSC = 1;
6250         }
6251 }
6252
6253 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6254                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6255                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6256 {
6257         struct drm_connector *drm_connector = &aconnector->base;
6258         uint32_t link_bandwidth_kbps;
6259         uint32_t max_dsc_target_bpp_limit_override = 0;
6260         struct dc *dc = sink->ctx->dc;
6261         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6262         uint32_t dsc_max_supported_bw_in_kbps;
6263
6264         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6265                                                         dc_link_get_link_cap(aconnector->dc_link));
6266
6267         if (stream->link && stream->link->local_sink)
6268                 max_dsc_target_bpp_limit_override =
6269                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6270
6271         /* Set DSC policy according to dsc_clock_en */
6272         dc_dsc_policy_set_enable_dsc_when_not_needed(
6273                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6274
6275         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6276             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6277
6278                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6279
6280         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6281                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6282                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6283                                                 dsc_caps,
6284                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6285                                                 max_dsc_target_bpp_limit_override,
6286                                                 link_bandwidth_kbps,
6287                                                 &stream->timing,
6288                                                 &stream->timing.dsc_cfg)) {
6289                                 stream->timing.flags.DSC = 1;
6290                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6291                                                                  __func__, drm_connector->name);
6292                         }
6293                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6294                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6295                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6296                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6297
6298                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6299                                         max_supported_bw_in_kbps > 0 &&
6300                                         dsc_max_supported_bw_in_kbps > 0)
6301                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6302                                                 dsc_caps,
6303                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6304                                                 max_dsc_target_bpp_limit_override,
6305                                                 dsc_max_supported_bw_in_kbps,
6306                                                 &stream->timing,
6307                                                 &stream->timing.dsc_cfg)) {
6308                                         stream->timing.flags.DSC = 1;
6309                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6310                                                                          __func__, drm_connector->name);
6311                                 }
6312                 }
6313         }
6314
6315         /* Overwrite the stream flag if DSC is enabled through debugfs */
6316         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6317                 stream->timing.flags.DSC = 1;
6318
6319         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6320                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6321
6322         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6323                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6324
6325         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6326                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6327 }
6328 #endif /* CONFIG_DRM_AMD_DC_DCN */
6329
6330 /**
6331  * DOC: FreeSync Video
6332  *
6333  * When a userspace application wants to play a video, the content follows a
6334  * standard format definition that usually specifies the FPS for that format.
6335  * The below list illustrates some video format and the expected FPS,
6336  * respectively:
6337  *
6338  * - TV/NTSC (23.976 FPS)
6339  * - Cinema (24 FPS)
6340  * - TV/PAL (25 FPS)
6341  * - TV/NTSC (29.97 FPS)
6342  * - TV/NTSC (30 FPS)
6343  * - Cinema HFR (48 FPS)
6344  * - TV/PAL (50 FPS)
6345  * - Commonly used (60 FPS)
6346  * - Multiples of 24 (48,72,96,120 FPS)
6347  *
6348  * The list of standards video format is not huge and can be added to the
6349  * connector modeset list beforehand. With that, userspace can leverage
6350  * FreeSync to extends the front porch in order to attain the target refresh
6351  * rate. Such a switch will happen seamlessly, without screen blanking or
6352  * reprogramming of the output in any other way. If the userspace requests a
6353  * modesetting change compatible with FreeSync modes that only differ in the
6354  * refresh rate, DC will skip the full update and avoid blink during the
6355  * transition. For example, the video player can change the modesetting from
6356  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6357  * causing any display blink. This same concept can be applied to a mode
6358  * setting change.
6359  */
6360 static struct drm_display_mode *
6361 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6362                           bool use_probed_modes)
6363 {
6364         struct drm_display_mode *m, *m_pref = NULL;
6365         u16 current_refresh, highest_refresh;
6366         struct list_head *list_head = use_probed_modes ?
6367                                                     &aconnector->base.probed_modes :
6368                                                     &aconnector->base.modes;
6369
6370         if (aconnector->freesync_vid_base.clock != 0)
6371                 return &aconnector->freesync_vid_base;
6372
6373         /* Find the preferred mode */
6374         list_for_each_entry (m, list_head, head) {
6375                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6376                         m_pref = m;
6377                         break;
6378                 }
6379         }
6380
6381         if (!m_pref) {
6382                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6383                 m_pref = list_first_entry_or_null(
6384                         &aconnector->base.modes, struct drm_display_mode, head);
6385                 if (!m_pref) {
6386                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6387                         return NULL;
6388                 }
6389         }
6390
6391         highest_refresh = drm_mode_vrefresh(m_pref);
6392
6393         /*
6394          * Find the mode with highest refresh rate with same resolution.
6395          * For some monitors, preferred mode is not the mode with highest
6396          * supported refresh rate.
6397          */
6398         list_for_each_entry (m, list_head, head) {
6399                 current_refresh  = drm_mode_vrefresh(m);
6400
6401                 if (m->hdisplay == m_pref->hdisplay &&
6402                     m->vdisplay == m_pref->vdisplay &&
6403                     highest_refresh < current_refresh) {
6404                         highest_refresh = current_refresh;
6405                         m_pref = m;
6406                 }
6407         }
6408
6409         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6410         return m_pref;
6411 }
6412
6413 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6414                                    struct amdgpu_dm_connector *aconnector)
6415 {
6416         struct drm_display_mode *high_mode;
6417         int timing_diff;
6418
6419         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6420         if (!high_mode || !mode)
6421                 return false;
6422
6423         timing_diff = high_mode->vtotal - mode->vtotal;
6424
6425         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6426             high_mode->hdisplay != mode->hdisplay ||
6427             high_mode->vdisplay != mode->vdisplay ||
6428             high_mode->hsync_start != mode->hsync_start ||
6429             high_mode->hsync_end != mode->hsync_end ||
6430             high_mode->htotal != mode->htotal ||
6431             high_mode->hskew != mode->hskew ||
6432             high_mode->vscan != mode->vscan ||
6433             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6434             high_mode->vsync_end - mode->vsync_end != timing_diff)
6435                 return false;
6436         else
6437                 return true;
6438 }
6439
6440 static struct dc_stream_state *
6441 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6442                        const struct drm_display_mode *drm_mode,
6443                        const struct dm_connector_state *dm_state,
6444                        const struct dc_stream_state *old_stream,
6445                        int requested_bpc)
6446 {
6447         struct drm_display_mode *preferred_mode = NULL;
6448         struct drm_connector *drm_connector;
6449         const struct drm_connector_state *con_state =
6450                 dm_state ? &dm_state->base : NULL;
6451         struct dc_stream_state *stream = NULL;
6452         struct drm_display_mode mode = *drm_mode;
6453         struct drm_display_mode saved_mode;
6454         struct drm_display_mode *freesync_mode = NULL;
6455         bool native_mode_found = false;
6456         bool recalculate_timing = false;
6457         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6458         int mode_refresh;
6459         int preferred_refresh = 0;
6460 #if defined(CONFIG_DRM_AMD_DC_DCN)
6461         struct dsc_dec_dpcd_caps dsc_caps;
6462 #endif
6463         struct dc_sink *sink = NULL;
6464
6465         memset(&saved_mode, 0, sizeof(saved_mode));
6466
6467         if (aconnector == NULL) {
6468                 DRM_ERROR("aconnector is NULL!\n");
6469                 return stream;
6470         }
6471
6472         drm_connector = &aconnector->base;
6473
6474         if (!aconnector->dc_sink) {
6475                 sink = create_fake_sink(aconnector);
6476                 if (!sink)
6477                         return stream;
6478         } else {
6479                 sink = aconnector->dc_sink;
6480                 dc_sink_retain(sink);
6481         }
6482
6483         stream = dc_create_stream_for_sink(sink);
6484
6485         if (stream == NULL) {
6486                 DRM_ERROR("Failed to create stream for sink!\n");
6487                 goto finish;
6488         }
6489
6490         stream->dm_stream_context = aconnector;
6491
6492         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6493                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6494
6495         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6496                 /* Search for preferred mode */
6497                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6498                         native_mode_found = true;
6499                         break;
6500                 }
6501         }
6502         if (!native_mode_found)
6503                 preferred_mode = list_first_entry_or_null(
6504                                 &aconnector->base.modes,
6505                                 struct drm_display_mode,
6506                                 head);
6507
6508         mode_refresh = drm_mode_vrefresh(&mode);
6509
6510         if (preferred_mode == NULL) {
6511                 /*
6512                  * This may not be an error, the use case is when we have no
6513                  * usermode calls to reset and set mode upon hotplug. In this
6514                  * case, we call set mode ourselves to restore the previous mode
6515                  * and the modelist may not be filled in in time.
6516                  */
6517                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6518         } else {
6519                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6520                 if (recalculate_timing) {
6521                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6522                         drm_mode_copy(&saved_mode, &mode);
6523                         drm_mode_copy(&mode, freesync_mode);
6524                 } else {
6525                         decide_crtc_timing_for_drm_display_mode(
6526                                 &mode, preferred_mode, scale);
6527
6528                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6529                 }
6530         }
6531
6532         if (recalculate_timing)
6533                 drm_mode_set_crtcinfo(&saved_mode, 0);
6534         else if (!dm_state)
6535                 drm_mode_set_crtcinfo(&mode, 0);
6536
6537        /*
6538         * If scaling is enabled and refresh rate didn't change
6539         * we copy the vic and polarities of the old timings
6540         */
6541         if (!scale || mode_refresh != preferred_refresh)
6542                 fill_stream_properties_from_drm_display_mode(
6543                         stream, &mode, &aconnector->base, con_state, NULL,
6544                         requested_bpc);
6545         else
6546                 fill_stream_properties_from_drm_display_mode(
6547                         stream, &mode, &aconnector->base, con_state, old_stream,
6548                         requested_bpc);
6549
6550 #if defined(CONFIG_DRM_AMD_DC_DCN)
6551         /* SST DSC determination policy */
6552         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6553         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6554                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6555 #endif
6556
6557         update_stream_scaling_settings(&mode, dm_state, stream);
6558
6559         fill_audio_info(
6560                 &stream->audio_info,
6561                 drm_connector,
6562                 sink);
6563
6564         update_stream_signal(stream, sink);
6565
6566         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6567                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6568
6569         if (stream->link->psr_settings.psr_feature_enabled) {
6570                 //
6571                 // should decide stream support vsc sdp colorimetry capability
6572                 // before building vsc info packet
6573                 //
6574                 stream->use_vsc_sdp_for_colorimetry = false;
6575                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6576                         stream->use_vsc_sdp_for_colorimetry =
6577                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6578                 } else {
6579                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6580                                 stream->use_vsc_sdp_for_colorimetry = true;
6581                 }
6582                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6583                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6584
6585         }
6586 finish:
6587         dc_sink_release(sink);
6588
6589         return stream;
6590 }
6591
6592 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6593 {
6594         drm_crtc_cleanup(crtc);
6595         kfree(crtc);
6596 }
6597
6598 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6599                                   struct drm_crtc_state *state)
6600 {
6601         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6602
6603         /* TODO Destroy dc_stream objects are stream object is flattened */
6604         if (cur->stream)
6605                 dc_stream_release(cur->stream);
6606
6607
6608         __drm_atomic_helper_crtc_destroy_state(state);
6609
6610
6611         kfree(state);
6612 }
6613
6614 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6615 {
6616         struct dm_crtc_state *state;
6617
6618         if (crtc->state)
6619                 dm_crtc_destroy_state(crtc, crtc->state);
6620
6621         state = kzalloc(sizeof(*state), GFP_KERNEL);
6622         if (WARN_ON(!state))
6623                 return;
6624
6625         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6626 }
6627
6628 static struct drm_crtc_state *
6629 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6630 {
6631         struct dm_crtc_state *state, *cur;
6632
6633         cur = to_dm_crtc_state(crtc->state);
6634
6635         if (WARN_ON(!crtc->state))
6636                 return NULL;
6637
6638         state = kzalloc(sizeof(*state), GFP_KERNEL);
6639         if (!state)
6640                 return NULL;
6641
6642         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6643
6644         if (cur->stream) {
6645                 state->stream = cur->stream;
6646                 dc_stream_retain(state->stream);
6647         }
6648
6649         state->active_planes = cur->active_planes;
6650         state->vrr_infopacket = cur->vrr_infopacket;
6651         state->abm_level = cur->abm_level;
6652         state->vrr_supported = cur->vrr_supported;
6653         state->freesync_config = cur->freesync_config;
6654         state->cm_has_degamma = cur->cm_has_degamma;
6655         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6656         state->force_dpms_off = cur->force_dpms_off;
6657         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6658
6659         return &state->base;
6660 }
6661
6662 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6663 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6664 {
6665         crtc_debugfs_init(crtc);
6666
6667         return 0;
6668 }
6669 #endif
6670
6671 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6672 {
6673         enum dc_irq_source irq_source;
6674         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6675         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6676         int rc;
6677
6678         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6679
6680         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6681
6682         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6683                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6684         return rc;
6685 }
6686
6687 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6688 {
6689         enum dc_irq_source irq_source;
6690         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6691         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6692         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6693         struct amdgpu_display_manager *dm = &adev->dm;
6694         struct vblank_control_work *work;
6695         int rc = 0;
6696
6697         if (enable) {
6698                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6699                 if (amdgpu_dm_vrr_active(acrtc_state))
6700                         rc = dm_set_vupdate_irq(crtc, true);
6701         } else {
6702                 /* vblank irq off -> vupdate irq off */
6703                 rc = dm_set_vupdate_irq(crtc, false);
6704         }
6705
6706         if (rc)
6707                 return rc;
6708
6709         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6710
6711         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6712                 return -EBUSY;
6713
6714         if (amdgpu_in_reset(adev))
6715                 return 0;
6716
6717         if (dm->vblank_control_workqueue) {
6718                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6719                 if (!work)
6720                         return -ENOMEM;
6721
6722                 INIT_WORK(&work->work, vblank_control_worker);
6723                 work->dm = dm;
6724                 work->acrtc = acrtc;
6725                 work->enable = enable;
6726
6727                 if (acrtc_state->stream) {
6728                         dc_stream_retain(acrtc_state->stream);
6729                         work->stream = acrtc_state->stream;
6730                 }
6731
6732                 queue_work(dm->vblank_control_workqueue, &work->work);
6733         }
6734
6735         return 0;
6736 }
6737
6738 static int dm_enable_vblank(struct drm_crtc *crtc)
6739 {
6740         return dm_set_vblank(crtc, true);
6741 }
6742
6743 static void dm_disable_vblank(struct drm_crtc *crtc)
6744 {
6745         dm_set_vblank(crtc, false);
6746 }
6747
6748 /* Implemented only the options currently availible for the driver */
6749 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6750         .reset = dm_crtc_reset_state,
6751         .destroy = amdgpu_dm_crtc_destroy,
6752         .set_config = drm_atomic_helper_set_config,
6753         .page_flip = drm_atomic_helper_page_flip,
6754         .atomic_duplicate_state = dm_crtc_duplicate_state,
6755         .atomic_destroy_state = dm_crtc_destroy_state,
6756         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6757         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6758         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6759         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6760         .enable_vblank = dm_enable_vblank,
6761         .disable_vblank = dm_disable_vblank,
6762         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6763 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6764         .late_register = amdgpu_dm_crtc_late_register,
6765 #endif
6766 };
6767
6768 static enum drm_connector_status
6769 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6770 {
6771         bool connected;
6772         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6773
6774         /*
6775          * Notes:
6776          * 1. This interface is NOT called in context of HPD irq.
6777          * 2. This interface *is called* in context of user-mode ioctl. Which
6778          * makes it a bad place for *any* MST-related activity.
6779          */
6780
6781         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6782             !aconnector->fake_enable)
6783                 connected = (aconnector->dc_sink != NULL);
6784         else
6785                 connected = (aconnector->base.force == DRM_FORCE_ON);
6786
6787         update_subconnector_property(aconnector);
6788
6789         return (connected ? connector_status_connected :
6790                         connector_status_disconnected);
6791 }
6792
6793 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6794                                             struct drm_connector_state *connector_state,
6795                                             struct drm_property *property,
6796                                             uint64_t val)
6797 {
6798         struct drm_device *dev = connector->dev;
6799         struct amdgpu_device *adev = drm_to_adev(dev);
6800         struct dm_connector_state *dm_old_state =
6801                 to_dm_connector_state(connector->state);
6802         struct dm_connector_state *dm_new_state =
6803                 to_dm_connector_state(connector_state);
6804
6805         int ret = -EINVAL;
6806
6807         if (property == dev->mode_config.scaling_mode_property) {
6808                 enum amdgpu_rmx_type rmx_type;
6809
6810                 switch (val) {
6811                 case DRM_MODE_SCALE_CENTER:
6812                         rmx_type = RMX_CENTER;
6813                         break;
6814                 case DRM_MODE_SCALE_ASPECT:
6815                         rmx_type = RMX_ASPECT;
6816                         break;
6817                 case DRM_MODE_SCALE_FULLSCREEN:
6818                         rmx_type = RMX_FULL;
6819                         break;
6820                 case DRM_MODE_SCALE_NONE:
6821                 default:
6822                         rmx_type = RMX_OFF;
6823                         break;
6824                 }
6825
6826                 if (dm_old_state->scaling == rmx_type)
6827                         return 0;
6828
6829                 dm_new_state->scaling = rmx_type;
6830                 ret = 0;
6831         } else if (property == adev->mode_info.underscan_hborder_property) {
6832                 dm_new_state->underscan_hborder = val;
6833                 ret = 0;
6834         } else if (property == adev->mode_info.underscan_vborder_property) {
6835                 dm_new_state->underscan_vborder = val;
6836                 ret = 0;
6837         } else if (property == adev->mode_info.underscan_property) {
6838                 dm_new_state->underscan_enable = val;
6839                 ret = 0;
6840         } else if (property == adev->mode_info.abm_level_property) {
6841                 dm_new_state->abm_level = val;
6842                 ret = 0;
6843         }
6844
6845         return ret;
6846 }
6847
6848 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6849                                             const struct drm_connector_state *state,
6850                                             struct drm_property *property,
6851                                             uint64_t *val)
6852 {
6853         struct drm_device *dev = connector->dev;
6854         struct amdgpu_device *adev = drm_to_adev(dev);
6855         struct dm_connector_state *dm_state =
6856                 to_dm_connector_state(state);
6857         int ret = -EINVAL;
6858
6859         if (property == dev->mode_config.scaling_mode_property) {
6860                 switch (dm_state->scaling) {
6861                 case RMX_CENTER:
6862                         *val = DRM_MODE_SCALE_CENTER;
6863                         break;
6864                 case RMX_ASPECT:
6865                         *val = DRM_MODE_SCALE_ASPECT;
6866                         break;
6867                 case RMX_FULL:
6868                         *val = DRM_MODE_SCALE_FULLSCREEN;
6869                         break;
6870                 case RMX_OFF:
6871                 default:
6872                         *val = DRM_MODE_SCALE_NONE;
6873                         break;
6874                 }
6875                 ret = 0;
6876         } else if (property == adev->mode_info.underscan_hborder_property) {
6877                 *val = dm_state->underscan_hborder;
6878                 ret = 0;
6879         } else if (property == adev->mode_info.underscan_vborder_property) {
6880                 *val = dm_state->underscan_vborder;
6881                 ret = 0;
6882         } else if (property == adev->mode_info.underscan_property) {
6883                 *val = dm_state->underscan_enable;
6884                 ret = 0;
6885         } else if (property == adev->mode_info.abm_level_property) {
6886                 *val = dm_state->abm_level;
6887                 ret = 0;
6888         }
6889
6890         return ret;
6891 }
6892
6893 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6894 {
6895         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6896
6897         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6898 }
6899
6900 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6901 {
6902         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6903         const struct dc_link *link = aconnector->dc_link;
6904         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6905         struct amdgpu_display_manager *dm = &adev->dm;
6906         int i;
6907
6908         /*
6909          * Call only if mst_mgr was iniitalized before since it's not done
6910          * for all connector types.
6911          */
6912         if (aconnector->mst_mgr.dev)
6913                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6914
6915 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6916         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6917         for (i = 0; i < dm->num_of_edps; i++) {
6918                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6919                         backlight_device_unregister(dm->backlight_dev[i]);
6920                         dm->backlight_dev[i] = NULL;
6921                 }
6922         }
6923 #endif
6924
6925         if (aconnector->dc_em_sink)
6926                 dc_sink_release(aconnector->dc_em_sink);
6927         aconnector->dc_em_sink = NULL;
6928         if (aconnector->dc_sink)
6929                 dc_sink_release(aconnector->dc_sink);
6930         aconnector->dc_sink = NULL;
6931
6932         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6933         drm_connector_unregister(connector);
6934         drm_connector_cleanup(connector);
6935         if (aconnector->i2c) {
6936                 i2c_del_adapter(&aconnector->i2c->base);
6937                 kfree(aconnector->i2c);
6938         }
6939         kfree(aconnector->dm_dp_aux.aux.name);
6940
6941         kfree(connector);
6942 }
6943
6944 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6945 {
6946         struct dm_connector_state *state =
6947                 to_dm_connector_state(connector->state);
6948
6949         if (connector->state)
6950                 __drm_atomic_helper_connector_destroy_state(connector->state);
6951
6952         kfree(state);
6953
6954         state = kzalloc(sizeof(*state), GFP_KERNEL);
6955
6956         if (state) {
6957                 state->scaling = RMX_OFF;
6958                 state->underscan_enable = false;
6959                 state->underscan_hborder = 0;
6960                 state->underscan_vborder = 0;
6961                 state->base.max_requested_bpc = 8;
6962                 state->vcpi_slots = 0;
6963                 state->pbn = 0;
6964                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6965                         state->abm_level = amdgpu_dm_abm_level;
6966
6967                 __drm_atomic_helper_connector_reset(connector, &state->base);
6968         }
6969 }
6970
6971 struct drm_connector_state *
6972 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6973 {
6974         struct dm_connector_state *state =
6975                 to_dm_connector_state(connector->state);
6976
6977         struct dm_connector_state *new_state =
6978                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6979
6980         if (!new_state)
6981                 return NULL;
6982
6983         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6984
6985         new_state->freesync_capable = state->freesync_capable;
6986         new_state->abm_level = state->abm_level;
6987         new_state->scaling = state->scaling;
6988         new_state->underscan_enable = state->underscan_enable;
6989         new_state->underscan_hborder = state->underscan_hborder;
6990         new_state->underscan_vborder = state->underscan_vborder;
6991         new_state->vcpi_slots = state->vcpi_slots;
6992         new_state->pbn = state->pbn;
6993         return &new_state->base;
6994 }
6995
6996 static int
6997 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6998 {
6999         struct amdgpu_dm_connector *amdgpu_dm_connector =
7000                 to_amdgpu_dm_connector(connector);
7001         int r;
7002
7003         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7004             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7005                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7006                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7007                 if (r)
7008                         return r;
7009         }
7010
7011 #if defined(CONFIG_DEBUG_FS)
7012         connector_debugfs_init(amdgpu_dm_connector);
7013 #endif
7014
7015         return 0;
7016 }
7017
7018 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7019         .reset = amdgpu_dm_connector_funcs_reset,
7020         .detect = amdgpu_dm_connector_detect,
7021         .fill_modes = drm_helper_probe_single_connector_modes,
7022         .destroy = amdgpu_dm_connector_destroy,
7023         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7024         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7025         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7026         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7027         .late_register = amdgpu_dm_connector_late_register,
7028         .early_unregister = amdgpu_dm_connector_unregister
7029 };
7030
7031 static int get_modes(struct drm_connector *connector)
7032 {
7033         return amdgpu_dm_connector_get_modes(connector);
7034 }
7035
7036 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7037 {
7038         struct dc_sink_init_data init_params = {
7039                         .link = aconnector->dc_link,
7040                         .sink_signal = SIGNAL_TYPE_VIRTUAL
7041         };
7042         struct edid *edid;
7043
7044         if (!aconnector->base.edid_blob_ptr) {
7045                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7046                                 aconnector->base.name);
7047
7048                 aconnector->base.force = DRM_FORCE_OFF;
7049                 aconnector->base.override_edid = false;
7050                 return;
7051         }
7052
7053         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7054
7055         aconnector->edid = edid;
7056
7057         aconnector->dc_em_sink = dc_link_add_remote_sink(
7058                 aconnector->dc_link,
7059                 (uint8_t *)edid,
7060                 (edid->extensions + 1) * EDID_LENGTH,
7061                 &init_params);
7062
7063         if (aconnector->base.force == DRM_FORCE_ON) {
7064                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7065                 aconnector->dc_link->local_sink :
7066                 aconnector->dc_em_sink;
7067                 dc_sink_retain(aconnector->dc_sink);
7068         }
7069 }
7070
7071 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7072 {
7073         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7074
7075         /*
7076          * In case of headless boot with force on for DP managed connector
7077          * Those settings have to be != 0 to get initial modeset
7078          */
7079         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7080                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7081                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7082         }
7083
7084
7085         aconnector->base.override_edid = true;
7086         create_eml_sink(aconnector);
7087 }
7088
7089 struct dc_stream_state *
7090 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7091                                 const struct drm_display_mode *drm_mode,
7092                                 const struct dm_connector_state *dm_state,
7093                                 const struct dc_stream_state *old_stream)
7094 {
7095         struct drm_connector *connector = &aconnector->base;
7096         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7097         struct dc_stream_state *stream;
7098         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7099         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7100         enum dc_status dc_result = DC_OK;
7101
7102         do {
7103                 stream = create_stream_for_sink(aconnector, drm_mode,
7104                                                 dm_state, old_stream,
7105                                                 requested_bpc);
7106                 if (stream == NULL) {
7107                         DRM_ERROR("Failed to create stream for sink!\n");
7108                         break;
7109                 }
7110
7111                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7112
7113                 if (dc_result != DC_OK) {
7114                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7115                                       drm_mode->hdisplay,
7116                                       drm_mode->vdisplay,
7117                                       drm_mode->clock,
7118                                       dc_result,
7119                                       dc_status_to_str(dc_result));
7120
7121                         dc_stream_release(stream);
7122                         stream = NULL;
7123                         requested_bpc -= 2; /* lower bpc to retry validation */
7124                 }
7125
7126         } while (stream == NULL && requested_bpc >= 6);
7127
7128         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7129                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7130
7131                 aconnector->force_yuv420_output = true;
7132                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7133                                                 dm_state, old_stream);
7134                 aconnector->force_yuv420_output = false;
7135         }
7136
7137         return stream;
7138 }
7139
7140 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7141                                    struct drm_display_mode *mode)
7142 {
7143         int result = MODE_ERROR;
7144         struct dc_sink *dc_sink;
7145         /* TODO: Unhardcode stream count */
7146         struct dc_stream_state *stream;
7147         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7148
7149         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7150                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7151                 return result;
7152
7153         /*
7154          * Only run this the first time mode_valid is called to initilialize
7155          * EDID mgmt
7156          */
7157         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7158                 !aconnector->dc_em_sink)
7159                 handle_edid_mgmt(aconnector);
7160
7161         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7162
7163         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7164                                 aconnector->base.force != DRM_FORCE_ON) {
7165                 DRM_ERROR("dc_sink is NULL!\n");
7166                 goto fail;
7167         }
7168
7169         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7170         if (stream) {
7171                 dc_stream_release(stream);
7172                 result = MODE_OK;
7173         }
7174
7175 fail:
7176         /* TODO: error handling*/
7177         return result;
7178 }
7179
7180 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7181                                 struct dc_info_packet *out)
7182 {
7183         struct hdmi_drm_infoframe frame;
7184         unsigned char buf[30]; /* 26 + 4 */
7185         ssize_t len;
7186         int ret, i;
7187
7188         memset(out, 0, sizeof(*out));
7189
7190         if (!state->hdr_output_metadata)
7191                 return 0;
7192
7193         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7194         if (ret)
7195                 return ret;
7196
7197         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7198         if (len < 0)
7199                 return (int)len;
7200
7201         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7202         if (len != 30)
7203                 return -EINVAL;
7204
7205         /* Prepare the infopacket for DC. */
7206         switch (state->connector->connector_type) {
7207         case DRM_MODE_CONNECTOR_HDMIA:
7208                 out->hb0 = 0x87; /* type */
7209                 out->hb1 = 0x01; /* version */
7210                 out->hb2 = 0x1A; /* length */
7211                 out->sb[0] = buf[3]; /* checksum */
7212                 i = 1;
7213                 break;
7214
7215         case DRM_MODE_CONNECTOR_DisplayPort:
7216         case DRM_MODE_CONNECTOR_eDP:
7217                 out->hb0 = 0x00; /* sdp id, zero */
7218                 out->hb1 = 0x87; /* type */
7219                 out->hb2 = 0x1D; /* payload len - 1 */
7220                 out->hb3 = (0x13 << 2); /* sdp version */
7221                 out->sb[0] = 0x01; /* version */
7222                 out->sb[1] = 0x1A; /* length */
7223                 i = 2;
7224                 break;
7225
7226         default:
7227                 return -EINVAL;
7228         }
7229
7230         memcpy(&out->sb[i], &buf[4], 26);
7231         out->valid = true;
7232
7233         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7234                        sizeof(out->sb), false);
7235
7236         return 0;
7237 }
7238
7239 static int
7240 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7241                                  struct drm_atomic_state *state)
7242 {
7243         struct drm_connector_state *new_con_state =
7244                 drm_atomic_get_new_connector_state(state, conn);
7245         struct drm_connector_state *old_con_state =
7246                 drm_atomic_get_old_connector_state(state, conn);
7247         struct drm_crtc *crtc = new_con_state->crtc;
7248         struct drm_crtc_state *new_crtc_state;
7249         int ret;
7250
7251         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7252
7253         if (!crtc)
7254                 return 0;
7255
7256         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7257                 struct dc_info_packet hdr_infopacket;
7258
7259                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7260                 if (ret)
7261                         return ret;
7262
7263                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7264                 if (IS_ERR(new_crtc_state))
7265                         return PTR_ERR(new_crtc_state);
7266
7267                 /*
7268                  * DC considers the stream backends changed if the
7269                  * static metadata changes. Forcing the modeset also
7270                  * gives a simple way for userspace to switch from
7271                  * 8bpc to 10bpc when setting the metadata to enter
7272                  * or exit HDR.
7273                  *
7274                  * Changing the static metadata after it's been
7275                  * set is permissible, however. So only force a
7276                  * modeset if we're entering or exiting HDR.
7277                  */
7278                 new_crtc_state->mode_changed =
7279                         !old_con_state->hdr_output_metadata ||
7280                         !new_con_state->hdr_output_metadata;
7281         }
7282
7283         return 0;
7284 }
7285
7286 static const struct drm_connector_helper_funcs
7287 amdgpu_dm_connector_helper_funcs = {
7288         /*
7289          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7290          * modes will be filtered by drm_mode_validate_size(), and those modes
7291          * are missing after user start lightdm. So we need to renew modes list.
7292          * in get_modes call back, not just return the modes count
7293          */
7294         .get_modes = get_modes,
7295         .mode_valid = amdgpu_dm_connector_mode_valid,
7296         .atomic_check = amdgpu_dm_connector_atomic_check,
7297 };
7298
7299 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7300 {
7301 }
7302
7303 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7304 {
7305         struct drm_atomic_state *state = new_crtc_state->state;
7306         struct drm_plane *plane;
7307         int num_active = 0;
7308
7309         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7310                 struct drm_plane_state *new_plane_state;
7311
7312                 /* Cursor planes are "fake". */
7313                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7314                         continue;
7315
7316                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7317
7318                 if (!new_plane_state) {
7319                         /*
7320                          * The plane is enable on the CRTC and hasn't changed
7321                          * state. This means that it previously passed
7322                          * validation and is therefore enabled.
7323                          */
7324                         num_active += 1;
7325                         continue;
7326                 }
7327
7328                 /* We need a framebuffer to be considered enabled. */
7329                 num_active += (new_plane_state->fb != NULL);
7330         }
7331
7332         return num_active;
7333 }
7334
7335 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7336                                          struct drm_crtc_state *new_crtc_state)
7337 {
7338         struct dm_crtc_state *dm_new_crtc_state =
7339                 to_dm_crtc_state(new_crtc_state);
7340
7341         dm_new_crtc_state->active_planes = 0;
7342
7343         if (!dm_new_crtc_state->stream)
7344                 return;
7345
7346         dm_new_crtc_state->active_planes =
7347                 count_crtc_active_planes(new_crtc_state);
7348 }
7349
7350 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7351                                        struct drm_atomic_state *state)
7352 {
7353         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7354                                                                           crtc);
7355         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7356         struct dc *dc = adev->dm.dc;
7357         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7358         int ret = -EINVAL;
7359
7360         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7361
7362         dm_update_crtc_active_planes(crtc, crtc_state);
7363
7364         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7365                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7366                 return ret;
7367         }
7368
7369         /*
7370          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7371          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7372          * planes are disabled, which is not supported by the hardware. And there is legacy
7373          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7374          */
7375         if (crtc_state->enable &&
7376             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7377                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7378                 return -EINVAL;
7379         }
7380
7381         /* In some use cases, like reset, no stream is attached */
7382         if (!dm_crtc_state->stream)
7383                 return 0;
7384
7385         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7386                 return 0;
7387
7388         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7389         return ret;
7390 }
7391
7392 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7393                                       const struct drm_display_mode *mode,
7394                                       struct drm_display_mode *adjusted_mode)
7395 {
7396         return true;
7397 }
7398
7399 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7400         .disable = dm_crtc_helper_disable,
7401         .atomic_check = dm_crtc_helper_atomic_check,
7402         .mode_fixup = dm_crtc_helper_mode_fixup,
7403         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7404 };
7405
7406 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7407 {
7408
7409 }
7410
7411 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7412 {
7413         switch (display_color_depth) {
7414                 case COLOR_DEPTH_666:
7415                         return 6;
7416                 case COLOR_DEPTH_888:
7417                         return 8;
7418                 case COLOR_DEPTH_101010:
7419                         return 10;
7420                 case COLOR_DEPTH_121212:
7421                         return 12;
7422                 case COLOR_DEPTH_141414:
7423                         return 14;
7424                 case COLOR_DEPTH_161616:
7425                         return 16;
7426                 default:
7427                         break;
7428                 }
7429         return 0;
7430 }
7431
7432 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7433                                           struct drm_crtc_state *crtc_state,
7434                                           struct drm_connector_state *conn_state)
7435 {
7436         struct drm_atomic_state *state = crtc_state->state;
7437         struct drm_connector *connector = conn_state->connector;
7438         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7439         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7440         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7441         struct drm_dp_mst_topology_mgr *mst_mgr;
7442         struct drm_dp_mst_port *mst_port;
7443         enum dc_color_depth color_depth;
7444         int clock, bpp = 0;
7445         bool is_y420 = false;
7446
7447         if (!aconnector->port || !aconnector->dc_sink)
7448                 return 0;
7449
7450         mst_port = aconnector->port;
7451         mst_mgr = &aconnector->mst_port->mst_mgr;
7452
7453         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7454                 return 0;
7455
7456         if (!state->duplicated) {
7457                 int max_bpc = conn_state->max_requested_bpc;
7458                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7459                                 aconnector->force_yuv420_output;
7460                 color_depth = convert_color_depth_from_display_info(connector,
7461                                                                     is_y420,
7462                                                                     max_bpc);
7463                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7464                 clock = adjusted_mode->clock;
7465                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7466         }
7467         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7468                                                                            mst_mgr,
7469                                                                            mst_port,
7470                                                                            dm_new_connector_state->pbn,
7471                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7472         if (dm_new_connector_state->vcpi_slots < 0) {
7473                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7474                 return dm_new_connector_state->vcpi_slots;
7475         }
7476         return 0;
7477 }
7478
7479 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7480         .disable = dm_encoder_helper_disable,
7481         .atomic_check = dm_encoder_helper_atomic_check
7482 };
7483
7484 #if defined(CONFIG_DRM_AMD_DC_DCN)
7485 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7486                                             struct dc_state *dc_state,
7487                                             struct dsc_mst_fairness_vars *vars)
7488 {
7489         struct dc_stream_state *stream = NULL;
7490         struct drm_connector *connector;
7491         struct drm_connector_state *new_con_state;
7492         struct amdgpu_dm_connector *aconnector;
7493         struct dm_connector_state *dm_conn_state;
7494         int i, j;
7495         int vcpi, pbn_div, pbn, slot_num = 0;
7496
7497         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7498
7499                 aconnector = to_amdgpu_dm_connector(connector);
7500
7501                 if (!aconnector->port)
7502                         continue;
7503
7504                 if (!new_con_state || !new_con_state->crtc)
7505                         continue;
7506
7507                 dm_conn_state = to_dm_connector_state(new_con_state);
7508
7509                 for (j = 0; j < dc_state->stream_count; j++) {
7510                         stream = dc_state->streams[j];
7511                         if (!stream)
7512                                 continue;
7513
7514                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7515                                 break;
7516
7517                         stream = NULL;
7518                 }
7519
7520                 if (!stream)
7521                         continue;
7522
7523                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7524                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7525                 for (j = 0; j < dc_state->stream_count; j++) {
7526                         if (vars[j].aconnector == aconnector) {
7527                                 pbn = vars[j].pbn;
7528                                 break;
7529                         }
7530                 }
7531
7532                 if (j == dc_state->stream_count)
7533                         continue;
7534
7535                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7536
7537                 if (stream->timing.flags.DSC != 1) {
7538                         dm_conn_state->pbn = pbn;
7539                         dm_conn_state->vcpi_slots = slot_num;
7540
7541                         drm_dp_mst_atomic_enable_dsc(state,
7542                                                      aconnector->port,
7543                                                      dm_conn_state->pbn,
7544                                                      0,
7545                                                      false);
7546                         continue;
7547                 }
7548
7549                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7550                                                     aconnector->port,
7551                                                     pbn, pbn_div,
7552                                                     true);
7553                 if (vcpi < 0)
7554                         return vcpi;
7555
7556                 dm_conn_state->pbn = pbn;
7557                 dm_conn_state->vcpi_slots = vcpi;
7558         }
7559         return 0;
7560 }
7561 #endif
7562
7563 static void dm_drm_plane_reset(struct drm_plane *plane)
7564 {
7565         struct dm_plane_state *amdgpu_state = NULL;
7566
7567         if (plane->state)
7568                 plane->funcs->atomic_destroy_state(plane, plane->state);
7569
7570         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7571         WARN_ON(amdgpu_state == NULL);
7572
7573         if (amdgpu_state)
7574                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7575 }
7576
7577 static struct drm_plane_state *
7578 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7579 {
7580         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7581
7582         old_dm_plane_state = to_dm_plane_state(plane->state);
7583         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7584         if (!dm_plane_state)
7585                 return NULL;
7586
7587         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7588
7589         if (old_dm_plane_state->dc_state) {
7590                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7591                 dc_plane_state_retain(dm_plane_state->dc_state);
7592         }
7593
7594         return &dm_plane_state->base;
7595 }
7596
7597 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7598                                 struct drm_plane_state *state)
7599 {
7600         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7601
7602         if (dm_plane_state->dc_state)
7603                 dc_plane_state_release(dm_plane_state->dc_state);
7604
7605         drm_atomic_helper_plane_destroy_state(plane, state);
7606 }
7607
7608 static const struct drm_plane_funcs dm_plane_funcs = {
7609         .update_plane   = drm_atomic_helper_update_plane,
7610         .disable_plane  = drm_atomic_helper_disable_plane,
7611         .destroy        = drm_primary_helper_destroy,
7612         .reset = dm_drm_plane_reset,
7613         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7614         .atomic_destroy_state = dm_drm_plane_destroy_state,
7615         .format_mod_supported = dm_plane_format_mod_supported,
7616 };
7617
7618 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7619                                       struct drm_plane_state *new_state)
7620 {
7621         struct amdgpu_framebuffer *afb;
7622         struct drm_gem_object *obj;
7623         struct amdgpu_device *adev;
7624         struct amdgpu_bo *rbo;
7625         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7626         uint32_t domain;
7627         int r;
7628
7629         if (!new_state->fb) {
7630                 DRM_DEBUG_KMS("No FB bound\n");
7631                 return 0;
7632         }
7633
7634         afb = to_amdgpu_framebuffer(new_state->fb);
7635         obj = new_state->fb->obj[0];
7636         rbo = gem_to_amdgpu_bo(obj);
7637         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7638
7639         r = amdgpu_bo_reserve(rbo, true);
7640         if (r) {
7641                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7642                 return r;
7643         }
7644
7645         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7646         if (r) {
7647                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7648                 goto error_unlock;
7649         }
7650
7651         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7652                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7653         else
7654                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7655
7656         r = amdgpu_bo_pin(rbo, domain);
7657         if (unlikely(r != 0)) {
7658                 if (r != -ERESTARTSYS)
7659                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7660                 goto error_unlock;
7661         }
7662
7663         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7664         if (unlikely(r != 0)) {
7665                 DRM_ERROR("%p bind failed\n", rbo);
7666                 goto error_unpin;
7667         }
7668
7669         amdgpu_bo_unreserve(rbo);
7670
7671         afb->address = amdgpu_bo_gpu_offset(rbo);
7672
7673         amdgpu_bo_ref(rbo);
7674
7675         /**
7676          * We don't do surface updates on planes that have been newly created,
7677          * but we also don't have the afb->address during atomic check.
7678          *
7679          * Fill in buffer attributes depending on the address here, but only on
7680          * newly created planes since they're not being used by DC yet and this
7681          * won't modify global state.
7682          */
7683         dm_plane_state_old = to_dm_plane_state(plane->state);
7684         dm_plane_state_new = to_dm_plane_state(new_state);
7685
7686         if (dm_plane_state_new->dc_state &&
7687             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7688                 struct dc_plane_state *plane_state =
7689                         dm_plane_state_new->dc_state;
7690                 bool force_disable_dcc = !plane_state->dcc.enable;
7691
7692                 fill_plane_buffer_attributes(
7693                         adev, afb, plane_state->format, plane_state->rotation,
7694                         afb->tiling_flags,
7695                         &plane_state->tiling_info, &plane_state->plane_size,
7696                         &plane_state->dcc, &plane_state->address,
7697                         afb->tmz_surface, force_disable_dcc);
7698         }
7699
7700         return 0;
7701
7702 error_unpin:
7703         amdgpu_bo_unpin(rbo);
7704
7705 error_unlock:
7706         amdgpu_bo_unreserve(rbo);
7707         return r;
7708 }
7709
7710 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7711                                        struct drm_plane_state *old_state)
7712 {
7713         struct amdgpu_bo *rbo;
7714         int r;
7715
7716         if (!old_state->fb)
7717                 return;
7718
7719         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7720         r = amdgpu_bo_reserve(rbo, false);
7721         if (unlikely(r)) {
7722                 DRM_ERROR("failed to reserve rbo before unpin\n");
7723                 return;
7724         }
7725
7726         amdgpu_bo_unpin(rbo);
7727         amdgpu_bo_unreserve(rbo);
7728         amdgpu_bo_unref(&rbo);
7729 }
7730
7731 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7732                                        struct drm_crtc_state *new_crtc_state)
7733 {
7734         struct drm_framebuffer *fb = state->fb;
7735         int min_downscale, max_upscale;
7736         int min_scale = 0;
7737         int max_scale = INT_MAX;
7738
7739         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7740         if (fb && state->crtc) {
7741                 /* Validate viewport to cover the case when only the position changes */
7742                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7743                         int viewport_width = state->crtc_w;
7744                         int viewport_height = state->crtc_h;
7745
7746                         if (state->crtc_x < 0)
7747                                 viewport_width += state->crtc_x;
7748                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7749                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7750
7751                         if (state->crtc_y < 0)
7752                                 viewport_height += state->crtc_y;
7753                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7754                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7755
7756                         if (viewport_width < 0 || viewport_height < 0) {
7757                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7758                                 return -EINVAL;
7759                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7760                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7761                                 return -EINVAL;
7762                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7763                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7764                                 return -EINVAL;
7765                         }
7766
7767                 }
7768
7769                 /* Get min/max allowed scaling factors from plane caps. */
7770                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7771                                              &min_downscale, &max_upscale);
7772                 /*
7773                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7774                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7775                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7776                  */
7777                 min_scale = (1000 << 16) / max_upscale;
7778                 max_scale = (1000 << 16) / min_downscale;
7779         }
7780
7781         return drm_atomic_helper_check_plane_state(
7782                 state, new_crtc_state, min_scale, max_scale, true, true);
7783 }
7784
7785 static int dm_plane_atomic_check(struct drm_plane *plane,
7786                                  struct drm_atomic_state *state)
7787 {
7788         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7789                                                                                  plane);
7790         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7791         struct dc *dc = adev->dm.dc;
7792         struct dm_plane_state *dm_plane_state;
7793         struct dc_scaling_info scaling_info;
7794         struct drm_crtc_state *new_crtc_state;
7795         int ret;
7796
7797         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7798
7799         dm_plane_state = to_dm_plane_state(new_plane_state);
7800
7801         if (!dm_plane_state->dc_state)
7802                 return 0;
7803
7804         new_crtc_state =
7805                 drm_atomic_get_new_crtc_state(state,
7806                                               new_plane_state->crtc);
7807         if (!new_crtc_state)
7808                 return -EINVAL;
7809
7810         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7811         if (ret)
7812                 return ret;
7813
7814         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7815         if (ret)
7816                 return ret;
7817
7818         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7819                 return 0;
7820
7821         return -EINVAL;
7822 }
7823
7824 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7825                                        struct drm_atomic_state *state)
7826 {
7827         /* Only support async updates on cursor planes. */
7828         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7829                 return -EINVAL;
7830
7831         return 0;
7832 }
7833
7834 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7835                                          struct drm_atomic_state *state)
7836 {
7837         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7838                                                                            plane);
7839         struct drm_plane_state *old_state =
7840                 drm_atomic_get_old_plane_state(state, plane);
7841
7842         trace_amdgpu_dm_atomic_update_cursor(new_state);
7843
7844         swap(plane->state->fb, new_state->fb);
7845
7846         plane->state->src_x = new_state->src_x;
7847         plane->state->src_y = new_state->src_y;
7848         plane->state->src_w = new_state->src_w;
7849         plane->state->src_h = new_state->src_h;
7850         plane->state->crtc_x = new_state->crtc_x;
7851         plane->state->crtc_y = new_state->crtc_y;
7852         plane->state->crtc_w = new_state->crtc_w;
7853         plane->state->crtc_h = new_state->crtc_h;
7854
7855         handle_cursor_update(plane, old_state);
7856 }
7857
7858 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7859         .prepare_fb = dm_plane_helper_prepare_fb,
7860         .cleanup_fb = dm_plane_helper_cleanup_fb,
7861         .atomic_check = dm_plane_atomic_check,
7862         .atomic_async_check = dm_plane_atomic_async_check,
7863         .atomic_async_update = dm_plane_atomic_async_update
7864 };
7865
7866 /*
7867  * TODO: these are currently initialized to rgb formats only.
7868  * For future use cases we should either initialize them dynamically based on
7869  * plane capabilities, or initialize this array to all formats, so internal drm
7870  * check will succeed, and let DC implement proper check
7871  */
7872 static const uint32_t rgb_formats[] = {
7873         DRM_FORMAT_XRGB8888,
7874         DRM_FORMAT_ARGB8888,
7875         DRM_FORMAT_RGBA8888,
7876         DRM_FORMAT_XRGB2101010,
7877         DRM_FORMAT_XBGR2101010,
7878         DRM_FORMAT_ARGB2101010,
7879         DRM_FORMAT_ABGR2101010,
7880         DRM_FORMAT_XRGB16161616,
7881         DRM_FORMAT_XBGR16161616,
7882         DRM_FORMAT_ARGB16161616,
7883         DRM_FORMAT_ABGR16161616,
7884         DRM_FORMAT_XBGR8888,
7885         DRM_FORMAT_ABGR8888,
7886         DRM_FORMAT_RGB565,
7887 };
7888
7889 static const uint32_t overlay_formats[] = {
7890         DRM_FORMAT_XRGB8888,
7891         DRM_FORMAT_ARGB8888,
7892         DRM_FORMAT_RGBA8888,
7893         DRM_FORMAT_XBGR8888,
7894         DRM_FORMAT_ABGR8888,
7895         DRM_FORMAT_RGB565
7896 };
7897
7898 static const u32 cursor_formats[] = {
7899         DRM_FORMAT_ARGB8888
7900 };
7901
7902 static int get_plane_formats(const struct drm_plane *plane,
7903                              const struct dc_plane_cap *plane_cap,
7904                              uint32_t *formats, int max_formats)
7905 {
7906         int i, num_formats = 0;
7907
7908         /*
7909          * TODO: Query support for each group of formats directly from
7910          * DC plane caps. This will require adding more formats to the
7911          * caps list.
7912          */
7913
7914         switch (plane->type) {
7915         case DRM_PLANE_TYPE_PRIMARY:
7916                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7917                         if (num_formats >= max_formats)
7918                                 break;
7919
7920                         formats[num_formats++] = rgb_formats[i];
7921                 }
7922
7923                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7924                         formats[num_formats++] = DRM_FORMAT_NV12;
7925                 if (plane_cap && plane_cap->pixel_format_support.p010)
7926                         formats[num_formats++] = DRM_FORMAT_P010;
7927                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7928                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7929                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7930                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7931                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7932                 }
7933                 break;
7934
7935         case DRM_PLANE_TYPE_OVERLAY:
7936                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7937                         if (num_formats >= max_formats)
7938                                 break;
7939
7940                         formats[num_formats++] = overlay_formats[i];
7941                 }
7942                 break;
7943
7944         case DRM_PLANE_TYPE_CURSOR:
7945                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7946                         if (num_formats >= max_formats)
7947                                 break;
7948
7949                         formats[num_formats++] = cursor_formats[i];
7950                 }
7951                 break;
7952         }
7953
7954         return num_formats;
7955 }
7956
7957 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7958                                 struct drm_plane *plane,
7959                                 unsigned long possible_crtcs,
7960                                 const struct dc_plane_cap *plane_cap)
7961 {
7962         uint32_t formats[32];
7963         int num_formats;
7964         int res = -EPERM;
7965         unsigned int supported_rotations;
7966         uint64_t *modifiers = NULL;
7967
7968         num_formats = get_plane_formats(plane, plane_cap, formats,
7969                                         ARRAY_SIZE(formats));
7970
7971         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7972         if (res)
7973                 return res;
7974
7975         if (modifiers == NULL)
7976                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7977
7978         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7979                                        &dm_plane_funcs, formats, num_formats,
7980                                        modifiers, plane->type, NULL);
7981         kfree(modifiers);
7982         if (res)
7983                 return res;
7984
7985         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7986             plane_cap && plane_cap->per_pixel_alpha) {
7987                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7988                                           BIT(DRM_MODE_BLEND_PREMULTI) |
7989                                           BIT(DRM_MODE_BLEND_COVERAGE);
7990
7991                 drm_plane_create_alpha_property(plane);
7992                 drm_plane_create_blend_mode_property(plane, blend_caps);
7993         }
7994
7995         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7996             plane_cap &&
7997             (plane_cap->pixel_format_support.nv12 ||
7998              plane_cap->pixel_format_support.p010)) {
7999                 /* This only affects YUV formats. */
8000                 drm_plane_create_color_properties(
8001                         plane,
8002                         BIT(DRM_COLOR_YCBCR_BT601) |
8003                         BIT(DRM_COLOR_YCBCR_BT709) |
8004                         BIT(DRM_COLOR_YCBCR_BT2020),
8005                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8006                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8007                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8008         }
8009
8010         supported_rotations =
8011                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8012                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8013
8014         if (dm->adev->asic_type >= CHIP_BONAIRE &&
8015             plane->type != DRM_PLANE_TYPE_CURSOR)
8016                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8017                                                    supported_rotations);
8018
8019         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8020
8021         /* Create (reset) the plane state */
8022         if (plane->funcs->reset)
8023                 plane->funcs->reset(plane);
8024
8025         return 0;
8026 }
8027
8028 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8029                                struct drm_plane *plane,
8030                                uint32_t crtc_index)
8031 {
8032         struct amdgpu_crtc *acrtc = NULL;
8033         struct drm_plane *cursor_plane;
8034
8035         int res = -ENOMEM;
8036
8037         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8038         if (!cursor_plane)
8039                 goto fail;
8040
8041         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8042         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8043
8044         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8045         if (!acrtc)
8046                 goto fail;
8047
8048         res = drm_crtc_init_with_planes(
8049                         dm->ddev,
8050                         &acrtc->base,
8051                         plane,
8052                         cursor_plane,
8053                         &amdgpu_dm_crtc_funcs, NULL);
8054
8055         if (res)
8056                 goto fail;
8057
8058         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8059
8060         /* Create (reset) the plane state */
8061         if (acrtc->base.funcs->reset)
8062                 acrtc->base.funcs->reset(&acrtc->base);
8063
8064         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8065         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8066
8067         acrtc->crtc_id = crtc_index;
8068         acrtc->base.enabled = false;
8069         acrtc->otg_inst = -1;
8070
8071         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8072         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8073                                    true, MAX_COLOR_LUT_ENTRIES);
8074         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8075
8076         return 0;
8077
8078 fail:
8079         kfree(acrtc);
8080         kfree(cursor_plane);
8081         return res;
8082 }
8083
8084
8085 static int to_drm_connector_type(enum signal_type st)
8086 {
8087         switch (st) {
8088         case SIGNAL_TYPE_HDMI_TYPE_A:
8089                 return DRM_MODE_CONNECTOR_HDMIA;
8090         case SIGNAL_TYPE_EDP:
8091                 return DRM_MODE_CONNECTOR_eDP;
8092         case SIGNAL_TYPE_LVDS:
8093                 return DRM_MODE_CONNECTOR_LVDS;
8094         case SIGNAL_TYPE_RGB:
8095                 return DRM_MODE_CONNECTOR_VGA;
8096         case SIGNAL_TYPE_DISPLAY_PORT:
8097         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8098                 return DRM_MODE_CONNECTOR_DisplayPort;
8099         case SIGNAL_TYPE_DVI_DUAL_LINK:
8100         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8101                 return DRM_MODE_CONNECTOR_DVID;
8102         case SIGNAL_TYPE_VIRTUAL:
8103                 return DRM_MODE_CONNECTOR_VIRTUAL;
8104
8105         default:
8106                 return DRM_MODE_CONNECTOR_Unknown;
8107         }
8108 }
8109
8110 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8111 {
8112         struct drm_encoder *encoder;
8113
8114         /* There is only one encoder per connector */
8115         drm_connector_for_each_possible_encoder(connector, encoder)
8116                 return encoder;
8117
8118         return NULL;
8119 }
8120
8121 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8122 {
8123         struct drm_encoder *encoder;
8124         struct amdgpu_encoder *amdgpu_encoder;
8125
8126         encoder = amdgpu_dm_connector_to_encoder(connector);
8127
8128         if (encoder == NULL)
8129                 return;
8130
8131         amdgpu_encoder = to_amdgpu_encoder(encoder);
8132
8133         amdgpu_encoder->native_mode.clock = 0;
8134
8135         if (!list_empty(&connector->probed_modes)) {
8136                 struct drm_display_mode *preferred_mode = NULL;
8137
8138                 list_for_each_entry(preferred_mode,
8139                                     &connector->probed_modes,
8140                                     head) {
8141                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8142                                 amdgpu_encoder->native_mode = *preferred_mode;
8143
8144                         break;
8145                 }
8146
8147         }
8148 }
8149
8150 static struct drm_display_mode *
8151 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8152                              char *name,
8153                              int hdisplay, int vdisplay)
8154 {
8155         struct drm_device *dev = encoder->dev;
8156         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8157         struct drm_display_mode *mode = NULL;
8158         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8159
8160         mode = drm_mode_duplicate(dev, native_mode);
8161
8162         if (mode == NULL)
8163                 return NULL;
8164
8165         mode->hdisplay = hdisplay;
8166         mode->vdisplay = vdisplay;
8167         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8168         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8169
8170         return mode;
8171
8172 }
8173
8174 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8175                                                  struct drm_connector *connector)
8176 {
8177         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8178         struct drm_display_mode *mode = NULL;
8179         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8180         struct amdgpu_dm_connector *amdgpu_dm_connector =
8181                                 to_amdgpu_dm_connector(connector);
8182         int i;
8183         int n;
8184         struct mode_size {
8185                 char name[DRM_DISPLAY_MODE_LEN];
8186                 int w;
8187                 int h;
8188         } common_modes[] = {
8189                 {  "640x480",  640,  480},
8190                 {  "800x600",  800,  600},
8191                 { "1024x768", 1024,  768},
8192                 { "1280x720", 1280,  720},
8193                 { "1280x800", 1280,  800},
8194                 {"1280x1024", 1280, 1024},
8195                 { "1440x900", 1440,  900},
8196                 {"1680x1050", 1680, 1050},
8197                 {"1600x1200", 1600, 1200},
8198                 {"1920x1080", 1920, 1080},
8199                 {"1920x1200", 1920, 1200}
8200         };
8201
8202         n = ARRAY_SIZE(common_modes);
8203
8204         for (i = 0; i < n; i++) {
8205                 struct drm_display_mode *curmode = NULL;
8206                 bool mode_existed = false;
8207
8208                 if (common_modes[i].w > native_mode->hdisplay ||
8209                     common_modes[i].h > native_mode->vdisplay ||
8210                    (common_modes[i].w == native_mode->hdisplay &&
8211                     common_modes[i].h == native_mode->vdisplay))
8212                         continue;
8213
8214                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8215                         if (common_modes[i].w == curmode->hdisplay &&
8216                             common_modes[i].h == curmode->vdisplay) {
8217                                 mode_existed = true;
8218                                 break;
8219                         }
8220                 }
8221
8222                 if (mode_existed)
8223                         continue;
8224
8225                 mode = amdgpu_dm_create_common_mode(encoder,
8226                                 common_modes[i].name, common_modes[i].w,
8227                                 common_modes[i].h);
8228                 if (!mode)
8229                         continue;
8230
8231                 drm_mode_probed_add(connector, mode);
8232                 amdgpu_dm_connector->num_modes++;
8233         }
8234 }
8235
8236 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8237 {
8238         struct drm_encoder *encoder;
8239         struct amdgpu_encoder *amdgpu_encoder;
8240         const struct drm_display_mode *native_mode;
8241
8242         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8243             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8244                 return;
8245
8246         encoder = amdgpu_dm_connector_to_encoder(connector);
8247         if (!encoder)
8248                 return;
8249
8250         amdgpu_encoder = to_amdgpu_encoder(encoder);
8251
8252         native_mode = &amdgpu_encoder->native_mode;
8253         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8254                 return;
8255
8256         drm_connector_set_panel_orientation_with_quirk(connector,
8257                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8258                                                        native_mode->hdisplay,
8259                                                        native_mode->vdisplay);
8260 }
8261
8262 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8263                                               struct edid *edid)
8264 {
8265         struct amdgpu_dm_connector *amdgpu_dm_connector =
8266                         to_amdgpu_dm_connector(connector);
8267
8268         if (edid) {
8269                 /* empty probed_modes */
8270                 INIT_LIST_HEAD(&connector->probed_modes);
8271                 amdgpu_dm_connector->num_modes =
8272                                 drm_add_edid_modes(connector, edid);
8273
8274                 /* sorting the probed modes before calling function
8275                  * amdgpu_dm_get_native_mode() since EDID can have
8276                  * more than one preferred mode. The modes that are
8277                  * later in the probed mode list could be of higher
8278                  * and preferred resolution. For example, 3840x2160
8279                  * resolution in base EDID preferred timing and 4096x2160
8280                  * preferred resolution in DID extension block later.
8281                  */
8282                 drm_mode_sort(&connector->probed_modes);
8283                 amdgpu_dm_get_native_mode(connector);
8284
8285                 /* Freesync capabilities are reset by calling
8286                  * drm_add_edid_modes() and need to be
8287                  * restored here.
8288                  */
8289                 amdgpu_dm_update_freesync_caps(connector, edid);
8290
8291                 amdgpu_set_panel_orientation(connector);
8292         } else {
8293                 amdgpu_dm_connector->num_modes = 0;
8294         }
8295 }
8296
8297 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8298                               struct drm_display_mode *mode)
8299 {
8300         struct drm_display_mode *m;
8301
8302         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8303                 if (drm_mode_equal(m, mode))
8304                         return true;
8305         }
8306
8307         return false;
8308 }
8309
8310 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8311 {
8312         const struct drm_display_mode *m;
8313         struct drm_display_mode *new_mode;
8314         uint i;
8315         uint32_t new_modes_count = 0;
8316
8317         /* Standard FPS values
8318          *
8319          * 23.976       - TV/NTSC
8320          * 24           - Cinema
8321          * 25           - TV/PAL
8322          * 29.97        - TV/NTSC
8323          * 30           - TV/NTSC
8324          * 48           - Cinema HFR
8325          * 50           - TV/PAL
8326          * 60           - Commonly used
8327          * 48,72,96,120 - Multiples of 24
8328          */
8329         static const uint32_t common_rates[] = {
8330                 23976, 24000, 25000, 29970, 30000,
8331                 48000, 50000, 60000, 72000, 96000, 120000
8332         };
8333
8334         /*
8335          * Find mode with highest refresh rate with the same resolution
8336          * as the preferred mode. Some monitors report a preferred mode
8337          * with lower resolution than the highest refresh rate supported.
8338          */
8339
8340         m = get_highest_refresh_rate_mode(aconnector, true);
8341         if (!m)
8342                 return 0;
8343
8344         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8345                 uint64_t target_vtotal, target_vtotal_diff;
8346                 uint64_t num, den;
8347
8348                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8349                         continue;
8350
8351                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8352                     common_rates[i] > aconnector->max_vfreq * 1000)
8353                         continue;
8354
8355                 num = (unsigned long long)m->clock * 1000 * 1000;
8356                 den = common_rates[i] * (unsigned long long)m->htotal;
8357                 target_vtotal = div_u64(num, den);
8358                 target_vtotal_diff = target_vtotal - m->vtotal;
8359
8360                 /* Check for illegal modes */
8361                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8362                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8363                     m->vtotal + target_vtotal_diff < m->vsync_end)
8364                         continue;
8365
8366                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8367                 if (!new_mode)
8368                         goto out;
8369
8370                 new_mode->vtotal += (u16)target_vtotal_diff;
8371                 new_mode->vsync_start += (u16)target_vtotal_diff;
8372                 new_mode->vsync_end += (u16)target_vtotal_diff;
8373                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8374                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8375
8376                 if (!is_duplicate_mode(aconnector, new_mode)) {
8377                         drm_mode_probed_add(&aconnector->base, new_mode);
8378                         new_modes_count += 1;
8379                 } else
8380                         drm_mode_destroy(aconnector->base.dev, new_mode);
8381         }
8382  out:
8383         return new_modes_count;
8384 }
8385
8386 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8387                                                    struct edid *edid)
8388 {
8389         struct amdgpu_dm_connector *amdgpu_dm_connector =
8390                 to_amdgpu_dm_connector(connector);
8391
8392         if (!edid)
8393                 return;
8394
8395         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8396                 amdgpu_dm_connector->num_modes +=
8397                         add_fs_modes(amdgpu_dm_connector);
8398 }
8399
8400 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8401 {
8402         struct amdgpu_dm_connector *amdgpu_dm_connector =
8403                         to_amdgpu_dm_connector(connector);
8404         struct drm_encoder *encoder;
8405         struct edid *edid = amdgpu_dm_connector->edid;
8406
8407         encoder = amdgpu_dm_connector_to_encoder(connector);
8408
8409         if (!drm_edid_is_valid(edid)) {
8410                 amdgpu_dm_connector->num_modes =
8411                                 drm_add_modes_noedid(connector, 640, 480);
8412         } else {
8413                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8414                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8415                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8416         }
8417         amdgpu_dm_fbc_init(connector);
8418
8419         return amdgpu_dm_connector->num_modes;
8420 }
8421
8422 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8423                                      struct amdgpu_dm_connector *aconnector,
8424                                      int connector_type,
8425                                      struct dc_link *link,
8426                                      int link_index)
8427 {
8428         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8429
8430         /*
8431          * Some of the properties below require access to state, like bpc.
8432          * Allocate some default initial connector state with our reset helper.
8433          */
8434         if (aconnector->base.funcs->reset)
8435                 aconnector->base.funcs->reset(&aconnector->base);
8436
8437         aconnector->connector_id = link_index;
8438         aconnector->dc_link = link;
8439         aconnector->base.interlace_allowed = false;
8440         aconnector->base.doublescan_allowed = false;
8441         aconnector->base.stereo_allowed = false;
8442         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8443         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8444         aconnector->audio_inst = -1;
8445         mutex_init(&aconnector->hpd_lock);
8446
8447         /*
8448          * configure support HPD hot plug connector_>polled default value is 0
8449          * which means HPD hot plug not supported
8450          */
8451         switch (connector_type) {
8452         case DRM_MODE_CONNECTOR_HDMIA:
8453                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8454                 aconnector->base.ycbcr_420_allowed =
8455                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8456                 break;
8457         case DRM_MODE_CONNECTOR_DisplayPort:
8458                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8459                 link->link_enc = link_enc_cfg_get_link_enc(link);
8460                 ASSERT(link->link_enc);
8461                 if (link->link_enc)
8462                         aconnector->base.ycbcr_420_allowed =
8463                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8464                 break;
8465         case DRM_MODE_CONNECTOR_DVID:
8466                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8467                 break;
8468         default:
8469                 break;
8470         }
8471
8472         drm_object_attach_property(&aconnector->base.base,
8473                                 dm->ddev->mode_config.scaling_mode_property,
8474                                 DRM_MODE_SCALE_NONE);
8475
8476         drm_object_attach_property(&aconnector->base.base,
8477                                 adev->mode_info.underscan_property,
8478                                 UNDERSCAN_OFF);
8479         drm_object_attach_property(&aconnector->base.base,
8480                                 adev->mode_info.underscan_hborder_property,
8481                                 0);
8482         drm_object_attach_property(&aconnector->base.base,
8483                                 adev->mode_info.underscan_vborder_property,
8484                                 0);
8485
8486         if (!aconnector->mst_port)
8487                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8488
8489         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8490         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8491         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8492
8493         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8494             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8495                 drm_object_attach_property(&aconnector->base.base,
8496                                 adev->mode_info.abm_level_property, 0);
8497         }
8498
8499         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8500             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8501             connector_type == DRM_MODE_CONNECTOR_eDP) {
8502                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8503
8504                 if (!aconnector->mst_port)
8505                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8506
8507 #ifdef CONFIG_DRM_AMD_DC_HDCP
8508                 if (adev->dm.hdcp_workqueue)
8509                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8510 #endif
8511         }
8512 }
8513
8514 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8515                               struct i2c_msg *msgs, int num)
8516 {
8517         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8518         struct ddc_service *ddc_service = i2c->ddc_service;
8519         struct i2c_command cmd;
8520         int i;
8521         int result = -EIO;
8522
8523         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8524
8525         if (!cmd.payloads)
8526                 return result;
8527
8528         cmd.number_of_payloads = num;
8529         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8530         cmd.speed = 100;
8531
8532         for (i = 0; i < num; i++) {
8533                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8534                 cmd.payloads[i].address = msgs[i].addr;
8535                 cmd.payloads[i].length = msgs[i].len;
8536                 cmd.payloads[i].data = msgs[i].buf;
8537         }
8538
8539         if (dc_submit_i2c(
8540                         ddc_service->ctx->dc,
8541                         ddc_service->ddc_pin->hw_info.ddc_channel,
8542                         &cmd))
8543                 result = num;
8544
8545         kfree(cmd.payloads);
8546         return result;
8547 }
8548
8549 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8550 {
8551         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8552 }
8553
8554 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8555         .master_xfer = amdgpu_dm_i2c_xfer,
8556         .functionality = amdgpu_dm_i2c_func,
8557 };
8558
8559 static struct amdgpu_i2c_adapter *
8560 create_i2c(struct ddc_service *ddc_service,
8561            int link_index,
8562            int *res)
8563 {
8564         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8565         struct amdgpu_i2c_adapter *i2c;
8566
8567         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8568         if (!i2c)
8569                 return NULL;
8570         i2c->base.owner = THIS_MODULE;
8571         i2c->base.class = I2C_CLASS_DDC;
8572         i2c->base.dev.parent = &adev->pdev->dev;
8573         i2c->base.algo = &amdgpu_dm_i2c_algo;
8574         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8575         i2c_set_adapdata(&i2c->base, i2c);
8576         i2c->ddc_service = ddc_service;
8577         if (i2c->ddc_service->ddc_pin)
8578                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8579
8580         return i2c;
8581 }
8582
8583
8584 /*
8585  * Note: this function assumes that dc_link_detect() was called for the
8586  * dc_link which will be represented by this aconnector.
8587  */
8588 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8589                                     struct amdgpu_dm_connector *aconnector,
8590                                     uint32_t link_index,
8591                                     struct amdgpu_encoder *aencoder)
8592 {
8593         int res = 0;
8594         int connector_type;
8595         struct dc *dc = dm->dc;
8596         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8597         struct amdgpu_i2c_adapter *i2c;
8598
8599         link->priv = aconnector;
8600
8601         DRM_DEBUG_DRIVER("%s()\n", __func__);
8602
8603         i2c = create_i2c(link->ddc, link->link_index, &res);
8604         if (!i2c) {
8605                 DRM_ERROR("Failed to create i2c adapter data\n");
8606                 return -ENOMEM;
8607         }
8608
8609         aconnector->i2c = i2c;
8610         res = i2c_add_adapter(&i2c->base);
8611
8612         if (res) {
8613                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8614                 goto out_free;
8615         }
8616
8617         connector_type = to_drm_connector_type(link->connector_signal);
8618
8619         res = drm_connector_init_with_ddc(
8620                         dm->ddev,
8621                         &aconnector->base,
8622                         &amdgpu_dm_connector_funcs,
8623                         connector_type,
8624                         &i2c->base);
8625
8626         if (res) {
8627                 DRM_ERROR("connector_init failed\n");
8628                 aconnector->connector_id = -1;
8629                 goto out_free;
8630         }
8631
8632         drm_connector_helper_add(
8633                         &aconnector->base,
8634                         &amdgpu_dm_connector_helper_funcs);
8635
8636         amdgpu_dm_connector_init_helper(
8637                 dm,
8638                 aconnector,
8639                 connector_type,
8640                 link,
8641                 link_index);
8642
8643         drm_connector_attach_encoder(
8644                 &aconnector->base, &aencoder->base);
8645
8646         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8647                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8648                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8649
8650 out_free:
8651         if (res) {
8652                 kfree(i2c);
8653                 aconnector->i2c = NULL;
8654         }
8655         return res;
8656 }
8657
8658 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8659 {
8660         switch (adev->mode_info.num_crtc) {
8661         case 1:
8662                 return 0x1;
8663         case 2:
8664                 return 0x3;
8665         case 3:
8666                 return 0x7;
8667         case 4:
8668                 return 0xf;
8669         case 5:
8670                 return 0x1f;
8671         case 6:
8672         default:
8673                 return 0x3f;
8674         }
8675 }
8676
8677 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8678                                   struct amdgpu_encoder *aencoder,
8679                                   uint32_t link_index)
8680 {
8681         struct amdgpu_device *adev = drm_to_adev(dev);
8682
8683         int res = drm_encoder_init(dev,
8684                                    &aencoder->base,
8685                                    &amdgpu_dm_encoder_funcs,
8686                                    DRM_MODE_ENCODER_TMDS,
8687                                    NULL);
8688
8689         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8690
8691         if (!res)
8692                 aencoder->encoder_id = link_index;
8693         else
8694                 aencoder->encoder_id = -1;
8695
8696         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8697
8698         return res;
8699 }
8700
8701 static void manage_dm_interrupts(struct amdgpu_device *adev,
8702                                  struct amdgpu_crtc *acrtc,
8703                                  bool enable)
8704 {
8705         /*
8706          * We have no guarantee that the frontend index maps to the same
8707          * backend index - some even map to more than one.
8708          *
8709          * TODO: Use a different interrupt or check DC itself for the mapping.
8710          */
8711         int irq_type =
8712                 amdgpu_display_crtc_idx_to_irq_type(
8713                         adev,
8714                         acrtc->crtc_id);
8715
8716         if (enable) {
8717                 drm_crtc_vblank_on(&acrtc->base);
8718                 amdgpu_irq_get(
8719                         adev,
8720                         &adev->pageflip_irq,
8721                         irq_type);
8722 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8723                 amdgpu_irq_get(
8724                         adev,
8725                         &adev->vline0_irq,
8726                         irq_type);
8727 #endif
8728         } else {
8729 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8730                 amdgpu_irq_put(
8731                         adev,
8732                         &adev->vline0_irq,
8733                         irq_type);
8734 #endif
8735                 amdgpu_irq_put(
8736                         adev,
8737                         &adev->pageflip_irq,
8738                         irq_type);
8739                 drm_crtc_vblank_off(&acrtc->base);
8740         }
8741 }
8742
8743 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8744                                       struct amdgpu_crtc *acrtc)
8745 {
8746         int irq_type =
8747                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8748
8749         /**
8750          * This reads the current state for the IRQ and force reapplies
8751          * the setting to hardware.
8752          */
8753         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8754 }
8755
8756 static bool
8757 is_scaling_state_different(const struct dm_connector_state *dm_state,
8758                            const struct dm_connector_state *old_dm_state)
8759 {
8760         if (dm_state->scaling != old_dm_state->scaling)
8761                 return true;
8762         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8763                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8764                         return true;
8765         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8766                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8767                         return true;
8768         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8769                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8770                 return true;
8771         return false;
8772 }
8773
8774 #ifdef CONFIG_DRM_AMD_DC_HDCP
8775 static bool is_content_protection_different(struct drm_connector_state *state,
8776                                             const struct drm_connector_state *old_state,
8777                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8778 {
8779         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8780         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8781
8782         /* Handle: Type0/1 change */
8783         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8784             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8785                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8786                 return true;
8787         }
8788
8789         /* CP is being re enabled, ignore this
8790          *
8791          * Handles:     ENABLED -> DESIRED
8792          */
8793         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8794             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8795                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8796                 return false;
8797         }
8798
8799         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8800          *
8801          * Handles:     UNDESIRED -> ENABLED
8802          */
8803         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8804             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8805                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8806
8807         /* Stream removed and re-enabled
8808          *
8809          * Can sometimes overlap with the HPD case,
8810          * thus set update_hdcp to false to avoid
8811          * setting HDCP multiple times.
8812          *
8813          * Handles:     DESIRED -> DESIRED (Special case)
8814          */
8815         if (!(old_state->crtc && old_state->crtc->enabled) &&
8816                 state->crtc && state->crtc->enabled &&
8817                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8818                 dm_con_state->update_hdcp = false;
8819                 return true;
8820         }
8821
8822         /* Hot-plug, headless s3, dpms
8823          *
8824          * Only start HDCP if the display is connected/enabled.
8825          * update_hdcp flag will be set to false until the next
8826          * HPD comes in.
8827          *
8828          * Handles:     DESIRED -> DESIRED (Special case)
8829          */
8830         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8831             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8832                 dm_con_state->update_hdcp = false;
8833                 return true;
8834         }
8835
8836         /*
8837          * Handles:     UNDESIRED -> UNDESIRED
8838          *              DESIRED -> DESIRED
8839          *              ENABLED -> ENABLED
8840          */
8841         if (old_state->content_protection == state->content_protection)
8842                 return false;
8843
8844         /*
8845          * Handles:     UNDESIRED -> DESIRED
8846          *              DESIRED -> UNDESIRED
8847          *              ENABLED -> UNDESIRED
8848          */
8849         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8850                 return true;
8851
8852         /*
8853          * Handles:     DESIRED -> ENABLED
8854          */
8855         return false;
8856 }
8857
8858 #endif
8859 static void remove_stream(struct amdgpu_device *adev,
8860                           struct amdgpu_crtc *acrtc,
8861                           struct dc_stream_state *stream)
8862 {
8863         /* this is the update mode case */
8864
8865         acrtc->otg_inst = -1;
8866         acrtc->enabled = false;
8867 }
8868
8869 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8870                                struct dc_cursor_position *position)
8871 {
8872         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8873         int x, y;
8874         int xorigin = 0, yorigin = 0;
8875
8876         if (!crtc || !plane->state->fb)
8877                 return 0;
8878
8879         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8880             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8881                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8882                           __func__,
8883                           plane->state->crtc_w,
8884                           plane->state->crtc_h);
8885                 return -EINVAL;
8886         }
8887
8888         x = plane->state->crtc_x;
8889         y = plane->state->crtc_y;
8890
8891         if (x <= -amdgpu_crtc->max_cursor_width ||
8892             y <= -amdgpu_crtc->max_cursor_height)
8893                 return 0;
8894
8895         if (x < 0) {
8896                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8897                 x = 0;
8898         }
8899         if (y < 0) {
8900                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8901                 y = 0;
8902         }
8903         position->enable = true;
8904         position->translate_by_source = true;
8905         position->x = x;
8906         position->y = y;
8907         position->x_hotspot = xorigin;
8908         position->y_hotspot = yorigin;
8909
8910         return 0;
8911 }
8912
8913 static void handle_cursor_update(struct drm_plane *plane,
8914                                  struct drm_plane_state *old_plane_state)
8915 {
8916         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8917         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8918         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8919         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8920         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8921         uint64_t address = afb ? afb->address : 0;
8922         struct dc_cursor_position position = {0};
8923         struct dc_cursor_attributes attributes;
8924         int ret;
8925
8926         if (!plane->state->fb && !old_plane_state->fb)
8927                 return;
8928
8929         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8930                       __func__,
8931                       amdgpu_crtc->crtc_id,
8932                       plane->state->crtc_w,
8933                       plane->state->crtc_h);
8934
8935         ret = get_cursor_position(plane, crtc, &position);
8936         if (ret)
8937                 return;
8938
8939         if (!position.enable) {
8940                 /* turn off cursor */
8941                 if (crtc_state && crtc_state->stream) {
8942                         mutex_lock(&adev->dm.dc_lock);
8943                         dc_stream_set_cursor_position(crtc_state->stream,
8944                                                       &position);
8945                         mutex_unlock(&adev->dm.dc_lock);
8946                 }
8947                 return;
8948         }
8949
8950         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8951         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8952
8953         memset(&attributes, 0, sizeof(attributes));
8954         attributes.address.high_part = upper_32_bits(address);
8955         attributes.address.low_part  = lower_32_bits(address);
8956         attributes.width             = plane->state->crtc_w;
8957         attributes.height            = plane->state->crtc_h;
8958         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8959         attributes.rotation_angle    = 0;
8960         attributes.attribute_flags.value = 0;
8961
8962         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8963
8964         if (crtc_state->stream) {
8965                 mutex_lock(&adev->dm.dc_lock);
8966                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8967                                                          &attributes))
8968                         DRM_ERROR("DC failed to set cursor attributes\n");
8969
8970                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8971                                                    &position))
8972                         DRM_ERROR("DC failed to set cursor position\n");
8973                 mutex_unlock(&adev->dm.dc_lock);
8974         }
8975 }
8976
8977 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8978 {
8979
8980         assert_spin_locked(&acrtc->base.dev->event_lock);
8981         WARN_ON(acrtc->event);
8982
8983         acrtc->event = acrtc->base.state->event;
8984
8985         /* Set the flip status */
8986         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8987
8988         /* Mark this event as consumed */
8989         acrtc->base.state->event = NULL;
8990
8991         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8992                      acrtc->crtc_id);
8993 }
8994
8995 static void update_freesync_state_on_stream(
8996         struct amdgpu_display_manager *dm,
8997         struct dm_crtc_state *new_crtc_state,
8998         struct dc_stream_state *new_stream,
8999         struct dc_plane_state *surface,
9000         u32 flip_timestamp_in_us)
9001 {
9002         struct mod_vrr_params vrr_params;
9003         struct dc_info_packet vrr_infopacket = {0};
9004         struct amdgpu_device *adev = dm->adev;
9005         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9006         unsigned long flags;
9007         bool pack_sdp_v1_3 = false;
9008
9009         if (!new_stream)
9010                 return;
9011
9012         /*
9013          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9014          * For now it's sufficient to just guard against these conditions.
9015          */
9016
9017         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9018                 return;
9019
9020         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9021         vrr_params = acrtc->dm_irq_params.vrr_params;
9022
9023         if (surface) {
9024                 mod_freesync_handle_preflip(
9025                         dm->freesync_module,
9026                         surface,
9027                         new_stream,
9028                         flip_timestamp_in_us,
9029                         &vrr_params);
9030
9031                 if (adev->family < AMDGPU_FAMILY_AI &&
9032                     amdgpu_dm_vrr_active(new_crtc_state)) {
9033                         mod_freesync_handle_v_update(dm->freesync_module,
9034                                                      new_stream, &vrr_params);
9035
9036                         /* Need to call this before the frame ends. */
9037                         dc_stream_adjust_vmin_vmax(dm->dc,
9038                                                    new_crtc_state->stream,
9039                                                    &vrr_params.adjust);
9040                 }
9041         }
9042
9043         mod_freesync_build_vrr_infopacket(
9044                 dm->freesync_module,
9045                 new_stream,
9046                 &vrr_params,
9047                 PACKET_TYPE_VRR,
9048                 TRANSFER_FUNC_UNKNOWN,
9049                 &vrr_infopacket,
9050                 pack_sdp_v1_3);
9051
9052         new_crtc_state->freesync_timing_changed |=
9053                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9054                         &vrr_params.adjust,
9055                         sizeof(vrr_params.adjust)) != 0);
9056
9057         new_crtc_state->freesync_vrr_info_changed |=
9058                 (memcmp(&new_crtc_state->vrr_infopacket,
9059                         &vrr_infopacket,
9060                         sizeof(vrr_infopacket)) != 0);
9061
9062         acrtc->dm_irq_params.vrr_params = vrr_params;
9063         new_crtc_state->vrr_infopacket = vrr_infopacket;
9064
9065         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9066         new_stream->vrr_infopacket = vrr_infopacket;
9067
9068         if (new_crtc_state->freesync_vrr_info_changed)
9069                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9070                               new_crtc_state->base.crtc->base.id,
9071                               (int)new_crtc_state->base.vrr_enabled,
9072                               (int)vrr_params.state);
9073
9074         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9075 }
9076
9077 static void update_stream_irq_parameters(
9078         struct amdgpu_display_manager *dm,
9079         struct dm_crtc_state *new_crtc_state)
9080 {
9081         struct dc_stream_state *new_stream = new_crtc_state->stream;
9082         struct mod_vrr_params vrr_params;
9083         struct mod_freesync_config config = new_crtc_state->freesync_config;
9084         struct amdgpu_device *adev = dm->adev;
9085         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9086         unsigned long flags;
9087
9088         if (!new_stream)
9089                 return;
9090
9091         /*
9092          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9093          * For now it's sufficient to just guard against these conditions.
9094          */
9095         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9096                 return;
9097
9098         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9099         vrr_params = acrtc->dm_irq_params.vrr_params;
9100
9101         if (new_crtc_state->vrr_supported &&
9102             config.min_refresh_in_uhz &&
9103             config.max_refresh_in_uhz) {
9104                 /*
9105                  * if freesync compatible mode was set, config.state will be set
9106                  * in atomic check
9107                  */
9108                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9109                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9110                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9111                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9112                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9113                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9114                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9115                 } else {
9116                         config.state = new_crtc_state->base.vrr_enabled ?
9117                                                      VRR_STATE_ACTIVE_VARIABLE :
9118                                                      VRR_STATE_INACTIVE;
9119                 }
9120         } else {
9121                 config.state = VRR_STATE_UNSUPPORTED;
9122         }
9123
9124         mod_freesync_build_vrr_params(dm->freesync_module,
9125                                       new_stream,
9126                                       &config, &vrr_params);
9127
9128         new_crtc_state->freesync_timing_changed |=
9129                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9130                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9131
9132         new_crtc_state->freesync_config = config;
9133         /* Copy state for access from DM IRQ handler */
9134         acrtc->dm_irq_params.freesync_config = config;
9135         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9136         acrtc->dm_irq_params.vrr_params = vrr_params;
9137         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9138 }
9139
9140 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9141                                             struct dm_crtc_state *new_state)
9142 {
9143         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9144         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9145
9146         if (!old_vrr_active && new_vrr_active) {
9147                 /* Transition VRR inactive -> active:
9148                  * While VRR is active, we must not disable vblank irq, as a
9149                  * reenable after disable would compute bogus vblank/pflip
9150                  * timestamps if it likely happened inside display front-porch.
9151                  *
9152                  * We also need vupdate irq for the actual core vblank handling
9153                  * at end of vblank.
9154                  */
9155                 dm_set_vupdate_irq(new_state->base.crtc, true);
9156                 drm_crtc_vblank_get(new_state->base.crtc);
9157                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9158                                  __func__, new_state->base.crtc->base.id);
9159         } else if (old_vrr_active && !new_vrr_active) {
9160                 /* Transition VRR active -> inactive:
9161                  * Allow vblank irq disable again for fixed refresh rate.
9162                  */
9163                 dm_set_vupdate_irq(new_state->base.crtc, false);
9164                 drm_crtc_vblank_put(new_state->base.crtc);
9165                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9166                                  __func__, new_state->base.crtc->base.id);
9167         }
9168 }
9169
9170 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9171 {
9172         struct drm_plane *plane;
9173         struct drm_plane_state *old_plane_state;
9174         int i;
9175
9176         /*
9177          * TODO: Make this per-stream so we don't issue redundant updates for
9178          * commits with multiple streams.
9179          */
9180         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9181                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9182                         handle_cursor_update(plane, old_plane_state);
9183 }
9184
9185 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9186                                     struct dc_state *dc_state,
9187                                     struct drm_device *dev,
9188                                     struct amdgpu_display_manager *dm,
9189                                     struct drm_crtc *pcrtc,
9190                                     bool wait_for_vblank)
9191 {
9192         uint32_t i;
9193         uint64_t timestamp_ns;
9194         struct drm_plane *plane;
9195         struct drm_plane_state *old_plane_state, *new_plane_state;
9196         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9197         struct drm_crtc_state *new_pcrtc_state =
9198                         drm_atomic_get_new_crtc_state(state, pcrtc);
9199         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9200         struct dm_crtc_state *dm_old_crtc_state =
9201                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9202         int planes_count = 0, vpos, hpos;
9203         long r;
9204         unsigned long flags;
9205         struct amdgpu_bo *abo;
9206         uint32_t target_vblank, last_flip_vblank;
9207         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9208         bool cursor_update = false;
9209         bool pflip_present = false;
9210         struct {
9211                 struct dc_surface_update surface_updates[MAX_SURFACES];
9212                 struct dc_plane_info plane_infos[MAX_SURFACES];
9213                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9214                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9215                 struct dc_stream_update stream_update;
9216         } *bundle;
9217
9218         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9219
9220         if (!bundle) {
9221                 dm_error("Failed to allocate update bundle\n");
9222                 goto cleanup;
9223         }
9224
9225         /*
9226          * Disable the cursor first if we're disabling all the planes.
9227          * It'll remain on the screen after the planes are re-enabled
9228          * if we don't.
9229          */
9230         if (acrtc_state->active_planes == 0)
9231                 amdgpu_dm_commit_cursors(state);
9232
9233         /* update planes when needed */
9234         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9235                 struct drm_crtc *crtc = new_plane_state->crtc;
9236                 struct drm_crtc_state *new_crtc_state;
9237                 struct drm_framebuffer *fb = new_plane_state->fb;
9238                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9239                 bool plane_needs_flip;
9240                 struct dc_plane_state *dc_plane;
9241                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9242
9243                 /* Cursor plane is handled after stream updates */
9244                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9245                         if ((fb && crtc == pcrtc) ||
9246                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9247                                 cursor_update = true;
9248
9249                         continue;
9250                 }
9251
9252                 if (!fb || !crtc || pcrtc != crtc)
9253                         continue;
9254
9255                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9256                 if (!new_crtc_state->active)
9257                         continue;
9258
9259                 dc_plane = dm_new_plane_state->dc_state;
9260
9261                 bundle->surface_updates[planes_count].surface = dc_plane;
9262                 if (new_pcrtc_state->color_mgmt_changed) {
9263                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9264                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9265                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9266                 }
9267
9268                 fill_dc_scaling_info(dm->adev, new_plane_state,
9269                                      &bundle->scaling_infos[planes_count]);
9270
9271                 bundle->surface_updates[planes_count].scaling_info =
9272                         &bundle->scaling_infos[planes_count];
9273
9274                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9275
9276                 pflip_present = pflip_present || plane_needs_flip;
9277
9278                 if (!plane_needs_flip) {
9279                         planes_count += 1;
9280                         continue;
9281                 }
9282
9283                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9284
9285                 /*
9286                  * Wait for all fences on this FB. Do limited wait to avoid
9287                  * deadlock during GPU reset when this fence will not signal
9288                  * but we hold reservation lock for the BO.
9289                  */
9290                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9291                                           DMA_RESV_USAGE_WRITE, false,
9292                                           msecs_to_jiffies(5000));
9293                 if (unlikely(r <= 0))
9294                         DRM_ERROR("Waiting for fences timed out!");
9295
9296                 fill_dc_plane_info_and_addr(
9297                         dm->adev, new_plane_state,
9298                         afb->tiling_flags,
9299                         &bundle->plane_infos[planes_count],
9300                         &bundle->flip_addrs[planes_count].address,
9301                         afb->tmz_surface, false);
9302
9303                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9304                                  new_plane_state->plane->index,
9305                                  bundle->plane_infos[planes_count].dcc.enable);
9306
9307                 bundle->surface_updates[planes_count].plane_info =
9308                         &bundle->plane_infos[planes_count];
9309
9310                 /*
9311                  * Only allow immediate flips for fast updates that don't
9312                  * change FB pitch, DCC state, rotation or mirroing.
9313                  */
9314                 bundle->flip_addrs[planes_count].flip_immediate =
9315                         crtc->state->async_flip &&
9316                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9317
9318                 timestamp_ns = ktime_get_ns();
9319                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9320                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9321                 bundle->surface_updates[planes_count].surface = dc_plane;
9322
9323                 if (!bundle->surface_updates[planes_count].surface) {
9324                         DRM_ERROR("No surface for CRTC: id=%d\n",
9325                                         acrtc_attach->crtc_id);
9326                         continue;
9327                 }
9328
9329                 if (plane == pcrtc->primary)
9330                         update_freesync_state_on_stream(
9331                                 dm,
9332                                 acrtc_state,
9333                                 acrtc_state->stream,
9334                                 dc_plane,
9335                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9336
9337                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9338                                  __func__,
9339                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9340                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9341
9342                 planes_count += 1;
9343
9344         }
9345
9346         if (pflip_present) {
9347                 if (!vrr_active) {
9348                         /* Use old throttling in non-vrr fixed refresh rate mode
9349                          * to keep flip scheduling based on target vblank counts
9350                          * working in a backwards compatible way, e.g., for
9351                          * clients using the GLX_OML_sync_control extension or
9352                          * DRI3/Present extension with defined target_msc.
9353                          */
9354                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9355                 }
9356                 else {
9357                         /* For variable refresh rate mode only:
9358                          * Get vblank of last completed flip to avoid > 1 vrr
9359                          * flips per video frame by use of throttling, but allow
9360                          * flip programming anywhere in the possibly large
9361                          * variable vrr vblank interval for fine-grained flip
9362                          * timing control and more opportunity to avoid stutter
9363                          * on late submission of flips.
9364                          */
9365                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9366                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9367                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9368                 }
9369
9370                 target_vblank = last_flip_vblank + wait_for_vblank;
9371
9372                 /*
9373                  * Wait until we're out of the vertical blank period before the one
9374                  * targeted by the flip
9375                  */
9376                 while ((acrtc_attach->enabled &&
9377                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9378                                                             0, &vpos, &hpos, NULL,
9379                                                             NULL, &pcrtc->hwmode)
9380                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9381                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9382                         (int)(target_vblank -
9383                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9384                         usleep_range(1000, 1100);
9385                 }
9386
9387                 /**
9388                  * Prepare the flip event for the pageflip interrupt to handle.
9389                  *
9390                  * This only works in the case where we've already turned on the
9391                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9392                  * from 0 -> n planes we have to skip a hardware generated event
9393                  * and rely on sending it from software.
9394                  */
9395                 if (acrtc_attach->base.state->event &&
9396                     acrtc_state->active_planes > 0 &&
9397                     !acrtc_state->force_dpms_off) {
9398                         drm_crtc_vblank_get(pcrtc);
9399
9400                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9401
9402                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9403                         prepare_flip_isr(acrtc_attach);
9404
9405                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9406                 }
9407
9408                 if (acrtc_state->stream) {
9409                         if (acrtc_state->freesync_vrr_info_changed)
9410                                 bundle->stream_update.vrr_infopacket =
9411                                         &acrtc_state->stream->vrr_infopacket;
9412                 }
9413         } else if (cursor_update && acrtc_state->active_planes > 0 &&
9414                    !acrtc_state->force_dpms_off &&
9415                    acrtc_attach->base.state->event) {
9416                 drm_crtc_vblank_get(pcrtc);
9417
9418                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9419
9420                 acrtc_attach->event = acrtc_attach->base.state->event;
9421                 acrtc_attach->base.state->event = NULL;
9422
9423                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9424         }
9425
9426         /* Update the planes if changed or disable if we don't have any. */
9427         if ((planes_count || acrtc_state->active_planes == 0) &&
9428                 acrtc_state->stream) {
9429                 /*
9430                  * If PSR or idle optimizations are enabled then flush out
9431                  * any pending work before hardware programming.
9432                  */
9433                 if (dm->vblank_control_workqueue)
9434                         flush_workqueue(dm->vblank_control_workqueue);
9435
9436                 bundle->stream_update.stream = acrtc_state->stream;
9437                 if (new_pcrtc_state->mode_changed) {
9438                         bundle->stream_update.src = acrtc_state->stream->src;
9439                         bundle->stream_update.dst = acrtc_state->stream->dst;
9440                 }
9441
9442                 if (new_pcrtc_state->color_mgmt_changed) {
9443                         /*
9444                          * TODO: This isn't fully correct since we've actually
9445                          * already modified the stream in place.
9446                          */
9447                         bundle->stream_update.gamut_remap =
9448                                 &acrtc_state->stream->gamut_remap_matrix;
9449                         bundle->stream_update.output_csc_transform =
9450                                 &acrtc_state->stream->csc_color_matrix;
9451                         bundle->stream_update.out_transfer_func =
9452                                 acrtc_state->stream->out_transfer_func;
9453                 }
9454
9455                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9456                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9457                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9458
9459                 /*
9460                  * If FreeSync state on the stream has changed then we need to
9461                  * re-adjust the min/max bounds now that DC doesn't handle this
9462                  * as part of commit.
9463                  */
9464                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9465                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9466                         dc_stream_adjust_vmin_vmax(
9467                                 dm->dc, acrtc_state->stream,
9468                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9469                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9470                 }
9471                 mutex_lock(&dm->dc_lock);
9472                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9473                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9474                         amdgpu_dm_psr_disable(acrtc_state->stream);
9475
9476                 dc_commit_updates_for_stream(dm->dc,
9477                                                      bundle->surface_updates,
9478                                                      planes_count,
9479                                                      acrtc_state->stream,
9480                                                      &bundle->stream_update,
9481                                                      dc_state);
9482
9483                 /**
9484                  * Enable or disable the interrupts on the backend.
9485                  *
9486                  * Most pipes are put into power gating when unused.
9487                  *
9488                  * When power gating is enabled on a pipe we lose the
9489                  * interrupt enablement state when power gating is disabled.
9490                  *
9491                  * So we need to update the IRQ control state in hardware
9492                  * whenever the pipe turns on (since it could be previously
9493                  * power gated) or off (since some pipes can't be power gated
9494                  * on some ASICs).
9495                  */
9496                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9497                         dm_update_pflip_irq_state(drm_to_adev(dev),
9498                                                   acrtc_attach);
9499
9500                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9501                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9502                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9503                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9504
9505                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9506                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9507                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9508                         struct amdgpu_dm_connector *aconn =
9509                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9510
9511                         if (aconn->psr_skip_count > 0)
9512                                 aconn->psr_skip_count--;
9513
9514                         /* Allow PSR when skip count is 0. */
9515                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9516                 } else {
9517                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9518                 }
9519
9520                 mutex_unlock(&dm->dc_lock);
9521         }
9522
9523         /*
9524          * Update cursor state *after* programming all the planes.
9525          * This avoids redundant programming in the case where we're going
9526          * to be disabling a single plane - those pipes are being disabled.
9527          */
9528         if (acrtc_state->active_planes)
9529                 amdgpu_dm_commit_cursors(state);
9530
9531 cleanup:
9532         kfree(bundle);
9533 }
9534
9535 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9536                                    struct drm_atomic_state *state)
9537 {
9538         struct amdgpu_device *adev = drm_to_adev(dev);
9539         struct amdgpu_dm_connector *aconnector;
9540         struct drm_connector *connector;
9541         struct drm_connector_state *old_con_state, *new_con_state;
9542         struct drm_crtc_state *new_crtc_state;
9543         struct dm_crtc_state *new_dm_crtc_state;
9544         const struct dc_stream_status *status;
9545         int i, inst;
9546
9547         /* Notify device removals. */
9548         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9549                 if (old_con_state->crtc != new_con_state->crtc) {
9550                         /* CRTC changes require notification. */
9551                         goto notify;
9552                 }
9553
9554                 if (!new_con_state->crtc)
9555                         continue;
9556
9557                 new_crtc_state = drm_atomic_get_new_crtc_state(
9558                         state, new_con_state->crtc);
9559
9560                 if (!new_crtc_state)
9561                         continue;
9562
9563                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9564                         continue;
9565
9566         notify:
9567                 aconnector = to_amdgpu_dm_connector(connector);
9568
9569                 mutex_lock(&adev->dm.audio_lock);
9570                 inst = aconnector->audio_inst;
9571                 aconnector->audio_inst = -1;
9572                 mutex_unlock(&adev->dm.audio_lock);
9573
9574                 amdgpu_dm_audio_eld_notify(adev, inst);
9575         }
9576
9577         /* Notify audio device additions. */
9578         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9579                 if (!new_con_state->crtc)
9580                         continue;
9581
9582                 new_crtc_state = drm_atomic_get_new_crtc_state(
9583                         state, new_con_state->crtc);
9584
9585                 if (!new_crtc_state)
9586                         continue;
9587
9588                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9589                         continue;
9590
9591                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9592                 if (!new_dm_crtc_state->stream)
9593                         continue;
9594
9595                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9596                 if (!status)
9597                         continue;
9598
9599                 aconnector = to_amdgpu_dm_connector(connector);
9600
9601                 mutex_lock(&adev->dm.audio_lock);
9602                 inst = status->audio_inst;
9603                 aconnector->audio_inst = inst;
9604                 mutex_unlock(&adev->dm.audio_lock);
9605
9606                 amdgpu_dm_audio_eld_notify(adev, inst);
9607         }
9608 }
9609
9610 /*
9611  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9612  * @crtc_state: the DRM CRTC state
9613  * @stream_state: the DC stream state.
9614  *
9615  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9616  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9617  */
9618 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9619                                                 struct dc_stream_state *stream_state)
9620 {
9621         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9622 }
9623
9624 /**
9625  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9626  * @state: The atomic state to commit
9627  *
9628  * This will tell DC to commit the constructed DC state from atomic_check,
9629  * programming the hardware. Any failures here implies a hardware failure, since
9630  * atomic check should have filtered anything non-kosher.
9631  */
9632 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9633 {
9634         struct drm_device *dev = state->dev;
9635         struct amdgpu_device *adev = drm_to_adev(dev);
9636         struct amdgpu_display_manager *dm = &adev->dm;
9637         struct dm_atomic_state *dm_state;
9638         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9639         uint32_t i, j;
9640         struct drm_crtc *crtc;
9641         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9642         unsigned long flags;
9643         bool wait_for_vblank = true;
9644         struct drm_connector *connector;
9645         struct drm_connector_state *old_con_state, *new_con_state;
9646         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9647         int crtc_disable_count = 0;
9648         bool mode_set_reset_required = false;
9649
9650         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9651
9652         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9653
9654         dm_state = dm_atomic_get_new_state(state);
9655         if (dm_state && dm_state->context) {
9656                 dc_state = dm_state->context;
9657         } else {
9658                 /* No state changes, retain current state. */
9659                 dc_state_temp = dc_create_state(dm->dc);
9660                 ASSERT(dc_state_temp);
9661                 dc_state = dc_state_temp;
9662                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9663         }
9664
9665         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9666                                        new_crtc_state, i) {
9667                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9668
9669                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9670
9671                 if (old_crtc_state->active &&
9672                     (!new_crtc_state->active ||
9673                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9674                         manage_dm_interrupts(adev, acrtc, false);
9675                         dc_stream_release(dm_old_crtc_state->stream);
9676                 }
9677         }
9678
9679         drm_atomic_helper_calc_timestamping_constants(state);
9680
9681         /* update changed items */
9682         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9683                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9684
9685                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9686                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9687
9688                 drm_dbg_state(state->dev,
9689                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9690                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9691                         "connectors_changed:%d\n",
9692                         acrtc->crtc_id,
9693                         new_crtc_state->enable,
9694                         new_crtc_state->active,
9695                         new_crtc_state->planes_changed,
9696                         new_crtc_state->mode_changed,
9697                         new_crtc_state->active_changed,
9698                         new_crtc_state->connectors_changed);
9699
9700                 /* Disable cursor if disabling crtc */
9701                 if (old_crtc_state->active && !new_crtc_state->active) {
9702                         struct dc_cursor_position position;
9703
9704                         memset(&position, 0, sizeof(position));
9705                         mutex_lock(&dm->dc_lock);
9706                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9707                         mutex_unlock(&dm->dc_lock);
9708                 }
9709
9710                 /* Copy all transient state flags into dc state */
9711                 if (dm_new_crtc_state->stream) {
9712                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9713                                                             dm_new_crtc_state->stream);
9714                 }
9715
9716                 /* handles headless hotplug case, updating new_state and
9717                  * aconnector as needed
9718                  */
9719
9720                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9721
9722                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9723
9724                         if (!dm_new_crtc_state->stream) {
9725                                 /*
9726                                  * this could happen because of issues with
9727                                  * userspace notifications delivery.
9728                                  * In this case userspace tries to set mode on
9729                                  * display which is disconnected in fact.
9730                                  * dc_sink is NULL in this case on aconnector.
9731                                  * We expect reset mode will come soon.
9732                                  *
9733                                  * This can also happen when unplug is done
9734                                  * during resume sequence ended
9735                                  *
9736                                  * In this case, we want to pretend we still
9737                                  * have a sink to keep the pipe running so that
9738                                  * hw state is consistent with the sw state
9739                                  */
9740                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9741                                                 __func__, acrtc->base.base.id);
9742                                 continue;
9743                         }
9744
9745                         if (dm_old_crtc_state->stream)
9746                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9747
9748                         pm_runtime_get_noresume(dev->dev);
9749
9750                         acrtc->enabled = true;
9751                         acrtc->hw_mode = new_crtc_state->mode;
9752                         crtc->hwmode = new_crtc_state->mode;
9753                         mode_set_reset_required = true;
9754                 } else if (modereset_required(new_crtc_state)) {
9755                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9756                         /* i.e. reset mode */
9757                         if (dm_old_crtc_state->stream)
9758                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9759
9760                         mode_set_reset_required = true;
9761                 }
9762         } /* for_each_crtc_in_state() */
9763
9764         if (dc_state) {
9765                 /* if there mode set or reset, disable eDP PSR */
9766                 if (mode_set_reset_required) {
9767                         if (dm->vblank_control_workqueue)
9768                                 flush_workqueue(dm->vblank_control_workqueue);
9769
9770                         amdgpu_dm_psr_disable_all(dm);
9771                 }
9772
9773                 dm_enable_per_frame_crtc_master_sync(dc_state);
9774                 mutex_lock(&dm->dc_lock);
9775                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9776
9777                 /* Allow idle optimization when vblank count is 0 for display off */
9778                 if (dm->active_vblank_irq_count == 0)
9779                         dc_allow_idle_optimizations(dm->dc, true);
9780                 mutex_unlock(&dm->dc_lock);
9781         }
9782
9783         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9784                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9785
9786                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9787
9788                 if (dm_new_crtc_state->stream != NULL) {
9789                         const struct dc_stream_status *status =
9790                                         dc_stream_get_status(dm_new_crtc_state->stream);
9791
9792                         if (!status)
9793                                 status = dc_stream_get_status_from_state(dc_state,
9794                                                                          dm_new_crtc_state->stream);
9795                         if (!status)
9796                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9797                         else
9798                                 acrtc->otg_inst = status->primary_otg_inst;
9799                 }
9800         }
9801 #ifdef CONFIG_DRM_AMD_DC_HDCP
9802         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9803                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9804                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9805                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9806
9807                 new_crtc_state = NULL;
9808
9809                 if (acrtc)
9810                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9811
9812                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9813
9814                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9815                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9816                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9817                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9818                         dm_new_con_state->update_hdcp = true;
9819                         continue;
9820                 }
9821
9822                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9823                         hdcp_update_display(
9824                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9825                                 new_con_state->hdcp_content_type,
9826                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9827         }
9828 #endif
9829
9830         /* Handle connector state changes */
9831         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9832                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9833                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9834                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9835                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9836                 struct dc_stream_update stream_update;
9837                 struct dc_info_packet hdr_packet;
9838                 struct dc_stream_status *status = NULL;
9839                 bool abm_changed, hdr_changed, scaling_changed;
9840
9841                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9842                 memset(&stream_update, 0, sizeof(stream_update));
9843
9844                 if (acrtc) {
9845                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9846                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9847                 }
9848
9849                 /* Skip any modesets/resets */
9850                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9851                         continue;
9852
9853                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9854                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9855
9856                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9857                                                              dm_old_con_state);
9858
9859                 abm_changed = dm_new_crtc_state->abm_level !=
9860                               dm_old_crtc_state->abm_level;
9861
9862                 hdr_changed =
9863                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9864
9865                 if (!scaling_changed && !abm_changed && !hdr_changed)
9866                         continue;
9867
9868                 stream_update.stream = dm_new_crtc_state->stream;
9869                 if (scaling_changed) {
9870                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9871                                         dm_new_con_state, dm_new_crtc_state->stream);
9872
9873                         stream_update.src = dm_new_crtc_state->stream->src;
9874                         stream_update.dst = dm_new_crtc_state->stream->dst;
9875                 }
9876
9877                 if (abm_changed) {
9878                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9879
9880                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9881                 }
9882
9883                 if (hdr_changed) {
9884                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9885                         stream_update.hdr_static_metadata = &hdr_packet;
9886                 }
9887
9888                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9889
9890                 if (WARN_ON(!status))
9891                         continue;
9892
9893                 WARN_ON(!status->plane_count);
9894
9895                 /*
9896                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9897                  * Here we create an empty update on each plane.
9898                  * To fix this, DC should permit updating only stream properties.
9899                  */
9900                 for (j = 0; j < status->plane_count; j++)
9901                         dummy_updates[j].surface = status->plane_states[0];
9902
9903
9904                 mutex_lock(&dm->dc_lock);
9905                 dc_commit_updates_for_stream(dm->dc,
9906                                                      dummy_updates,
9907                                                      status->plane_count,
9908                                                      dm_new_crtc_state->stream,
9909                                                      &stream_update,
9910                                                      dc_state);
9911                 mutex_unlock(&dm->dc_lock);
9912         }
9913
9914         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9915         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9916                                       new_crtc_state, i) {
9917                 if (old_crtc_state->active && !new_crtc_state->active)
9918                         crtc_disable_count++;
9919
9920                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9921                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9922
9923                 /* For freesync config update on crtc state and params for irq */
9924                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9925
9926                 /* Handle vrr on->off / off->on transitions */
9927                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9928                                                 dm_new_crtc_state);
9929         }
9930
9931         /**
9932          * Enable interrupts for CRTCs that are newly enabled or went through
9933          * a modeset. It was intentionally deferred until after the front end
9934          * state was modified to wait until the OTG was on and so the IRQ
9935          * handlers didn't access stale or invalid state.
9936          */
9937         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9938                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9939 #ifdef CONFIG_DEBUG_FS
9940                 bool configure_crc = false;
9941                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9942 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9943                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9944 #endif
9945                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9946                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9947                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9948 #endif
9949                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9950
9951                 if (new_crtc_state->active &&
9952                     (!old_crtc_state->active ||
9953                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9954                         dc_stream_retain(dm_new_crtc_state->stream);
9955                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9956                         manage_dm_interrupts(adev, acrtc, true);
9957
9958 #ifdef CONFIG_DEBUG_FS
9959                         /**
9960                          * Frontend may have changed so reapply the CRC capture
9961                          * settings for the stream.
9962                          */
9963                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9964
9965                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9966                                 configure_crc = true;
9967 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9968                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9969                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9970                                         acrtc->dm_irq_params.crc_window.update_win = true;
9971                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9972                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9973                                         crc_rd_wrk->crtc = crtc;
9974                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9975                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9976                                 }
9977 #endif
9978                         }
9979
9980                         if (configure_crc)
9981                                 if (amdgpu_dm_crtc_configure_crc_source(
9982                                         crtc, dm_new_crtc_state, cur_crc_src))
9983                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9984 #endif
9985                 }
9986         }
9987
9988         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9989                 if (new_crtc_state->async_flip)
9990                         wait_for_vblank = false;
9991
9992         /* update planes when needed per crtc*/
9993         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9994                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9995
9996                 if (dm_new_crtc_state->stream)
9997                         amdgpu_dm_commit_planes(state, dc_state, dev,
9998                                                 dm, crtc, wait_for_vblank);
9999         }
10000
10001         /* Update audio instances for each connector. */
10002         amdgpu_dm_commit_audio(dev, state);
10003
10004 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
10005         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
10006         /* restore the backlight level */
10007         for (i = 0; i < dm->num_of_edps; i++) {
10008                 if (dm->backlight_dev[i] &&
10009                     (dm->actual_brightness[i] != dm->brightness[i]))
10010                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10011         }
10012 #endif
10013         /*
10014          * send vblank event on all events not handled in flip and
10015          * mark consumed event for drm_atomic_helper_commit_hw_done
10016          */
10017         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10018         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10019
10020                 if (new_crtc_state->event)
10021                         drm_send_event_locked(dev, &new_crtc_state->event->base);
10022
10023                 new_crtc_state->event = NULL;
10024         }
10025         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10026
10027         /* Signal HW programming completion */
10028         drm_atomic_helper_commit_hw_done(state);
10029
10030         if (wait_for_vblank)
10031                 drm_atomic_helper_wait_for_flip_done(dev, state);
10032
10033         drm_atomic_helper_cleanup_planes(dev, state);
10034
10035         /* return the stolen vga memory back to VRAM */
10036         if (!adev->mman.keep_stolen_vga_memory)
10037                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10038         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10039
10040         /*
10041          * Finally, drop a runtime PM reference for each newly disabled CRTC,
10042          * so we can put the GPU into runtime suspend if we're not driving any
10043          * displays anymore
10044          */
10045         for (i = 0; i < crtc_disable_count; i++)
10046                 pm_runtime_put_autosuspend(dev->dev);
10047         pm_runtime_mark_last_busy(dev->dev);
10048
10049         if (dc_state_temp)
10050                 dc_release_state(dc_state_temp);
10051 }
10052
10053
10054 static int dm_force_atomic_commit(struct drm_connector *connector)
10055 {
10056         int ret = 0;
10057         struct drm_device *ddev = connector->dev;
10058         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10059         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10060         struct drm_plane *plane = disconnected_acrtc->base.primary;
10061         struct drm_connector_state *conn_state;
10062         struct drm_crtc_state *crtc_state;
10063         struct drm_plane_state *plane_state;
10064
10065         if (!state)
10066                 return -ENOMEM;
10067
10068         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10069
10070         /* Construct an atomic state to restore previous display setting */
10071
10072         /*
10073          * Attach connectors to drm_atomic_state
10074          */
10075         conn_state = drm_atomic_get_connector_state(state, connector);
10076
10077         ret = PTR_ERR_OR_ZERO(conn_state);
10078         if (ret)
10079                 goto out;
10080
10081         /* Attach crtc to drm_atomic_state*/
10082         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10083
10084         ret = PTR_ERR_OR_ZERO(crtc_state);
10085         if (ret)
10086                 goto out;
10087
10088         /* force a restore */
10089         crtc_state->mode_changed = true;
10090
10091         /* Attach plane to drm_atomic_state */
10092         plane_state = drm_atomic_get_plane_state(state, plane);
10093
10094         ret = PTR_ERR_OR_ZERO(plane_state);
10095         if (ret)
10096                 goto out;
10097
10098         /* Call commit internally with the state we just constructed */
10099         ret = drm_atomic_commit(state);
10100
10101 out:
10102         drm_atomic_state_put(state);
10103         if (ret)
10104                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10105
10106         return ret;
10107 }
10108
10109 /*
10110  * This function handles all cases when set mode does not come upon hotplug.
10111  * This includes when a display is unplugged then plugged back into the
10112  * same port and when running without usermode desktop manager supprot
10113  */
10114 void dm_restore_drm_connector_state(struct drm_device *dev,
10115                                     struct drm_connector *connector)
10116 {
10117         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10118         struct amdgpu_crtc *disconnected_acrtc;
10119         struct dm_crtc_state *acrtc_state;
10120
10121         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10122                 return;
10123
10124         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10125         if (!disconnected_acrtc)
10126                 return;
10127
10128         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10129         if (!acrtc_state->stream)
10130                 return;
10131
10132         /*
10133          * If the previous sink is not released and different from the current,
10134          * we deduce we are in a state where we can not rely on usermode call
10135          * to turn on the display, so we do it here
10136          */
10137         if (acrtc_state->stream->sink != aconnector->dc_sink)
10138                 dm_force_atomic_commit(&aconnector->base);
10139 }
10140
10141 /*
10142  * Grabs all modesetting locks to serialize against any blocking commits,
10143  * Waits for completion of all non blocking commits.
10144  */
10145 static int do_aquire_global_lock(struct drm_device *dev,
10146                                  struct drm_atomic_state *state)
10147 {
10148         struct drm_crtc *crtc;
10149         struct drm_crtc_commit *commit;
10150         long ret;
10151
10152         /*
10153          * Adding all modeset locks to aquire_ctx will
10154          * ensure that when the framework release it the
10155          * extra locks we are locking here will get released to
10156          */
10157         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10158         if (ret)
10159                 return ret;
10160
10161         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10162                 spin_lock(&crtc->commit_lock);
10163                 commit = list_first_entry_or_null(&crtc->commit_list,
10164                                 struct drm_crtc_commit, commit_entry);
10165                 if (commit)
10166                         drm_crtc_commit_get(commit);
10167                 spin_unlock(&crtc->commit_lock);
10168
10169                 if (!commit)
10170                         continue;
10171
10172                 /*
10173                  * Make sure all pending HW programming completed and
10174                  * page flips done
10175                  */
10176                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10177
10178                 if (ret > 0)
10179                         ret = wait_for_completion_interruptible_timeout(
10180                                         &commit->flip_done, 10*HZ);
10181
10182                 if (ret == 0)
10183                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10184                                   "timed out\n", crtc->base.id, crtc->name);
10185
10186                 drm_crtc_commit_put(commit);
10187         }
10188
10189         return ret < 0 ? ret : 0;
10190 }
10191
10192 static void get_freesync_config_for_crtc(
10193         struct dm_crtc_state *new_crtc_state,
10194         struct dm_connector_state *new_con_state)
10195 {
10196         struct mod_freesync_config config = {0};
10197         struct amdgpu_dm_connector *aconnector =
10198                         to_amdgpu_dm_connector(new_con_state->base.connector);
10199         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10200         int vrefresh = drm_mode_vrefresh(mode);
10201         bool fs_vid_mode = false;
10202
10203         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10204                                         vrefresh >= aconnector->min_vfreq &&
10205                                         vrefresh <= aconnector->max_vfreq;
10206
10207         if (new_crtc_state->vrr_supported) {
10208                 new_crtc_state->stream->ignore_msa_timing_param = true;
10209                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10210
10211                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10212                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10213                 config.vsif_supported = true;
10214                 config.btr = true;
10215
10216                 if (fs_vid_mode) {
10217                         config.state = VRR_STATE_ACTIVE_FIXED;
10218                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10219                         goto out;
10220                 } else if (new_crtc_state->base.vrr_enabled) {
10221                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10222                 } else {
10223                         config.state = VRR_STATE_INACTIVE;
10224                 }
10225         }
10226 out:
10227         new_crtc_state->freesync_config = config;
10228 }
10229
10230 static void reset_freesync_config_for_crtc(
10231         struct dm_crtc_state *new_crtc_state)
10232 {
10233         new_crtc_state->vrr_supported = false;
10234
10235         memset(&new_crtc_state->vrr_infopacket, 0,
10236                sizeof(new_crtc_state->vrr_infopacket));
10237 }
10238
10239 static bool
10240 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10241                                  struct drm_crtc_state *new_crtc_state)
10242 {
10243         const struct drm_display_mode *old_mode, *new_mode;
10244
10245         if (!old_crtc_state || !new_crtc_state)
10246                 return false;
10247
10248         old_mode = &old_crtc_state->mode;
10249         new_mode = &new_crtc_state->mode;
10250
10251         if (old_mode->clock       == new_mode->clock &&
10252             old_mode->hdisplay    == new_mode->hdisplay &&
10253             old_mode->vdisplay    == new_mode->vdisplay &&
10254             old_mode->htotal      == new_mode->htotal &&
10255             old_mode->vtotal      != new_mode->vtotal &&
10256             old_mode->hsync_start == new_mode->hsync_start &&
10257             old_mode->vsync_start != new_mode->vsync_start &&
10258             old_mode->hsync_end   == new_mode->hsync_end &&
10259             old_mode->vsync_end   != new_mode->vsync_end &&
10260             old_mode->hskew       == new_mode->hskew &&
10261             old_mode->vscan       == new_mode->vscan &&
10262             (old_mode->vsync_end - old_mode->vsync_start) ==
10263             (new_mode->vsync_end - new_mode->vsync_start))
10264                 return true;
10265
10266         return false;
10267 }
10268
10269 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10270         uint64_t num, den, res;
10271         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10272
10273         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10274
10275         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10276         den = (unsigned long long)new_crtc_state->mode.htotal *
10277               (unsigned long long)new_crtc_state->mode.vtotal;
10278
10279         res = div_u64(num, den);
10280         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10281 }
10282
10283 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10284                          struct drm_atomic_state *state,
10285                          struct drm_crtc *crtc,
10286                          struct drm_crtc_state *old_crtc_state,
10287                          struct drm_crtc_state *new_crtc_state,
10288                          bool enable,
10289                          bool *lock_and_validation_needed)
10290 {
10291         struct dm_atomic_state *dm_state = NULL;
10292         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10293         struct dc_stream_state *new_stream;
10294         int ret = 0;
10295
10296         /*
10297          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10298          * update changed items
10299          */
10300         struct amdgpu_crtc *acrtc = NULL;
10301         struct amdgpu_dm_connector *aconnector = NULL;
10302         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10303         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10304
10305         new_stream = NULL;
10306
10307         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10308         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10309         acrtc = to_amdgpu_crtc(crtc);
10310         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10311
10312         /* TODO This hack should go away */
10313         if (aconnector && enable) {
10314                 /* Make sure fake sink is created in plug-in scenario */
10315                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10316                                                             &aconnector->base);
10317                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10318                                                             &aconnector->base);
10319
10320                 if (IS_ERR(drm_new_conn_state)) {
10321                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10322                         goto fail;
10323                 }
10324
10325                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10326                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10327
10328                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10329                         goto skip_modeset;
10330
10331                 new_stream = create_validate_stream_for_sink(aconnector,
10332                                                              &new_crtc_state->mode,
10333                                                              dm_new_conn_state,
10334                                                              dm_old_crtc_state->stream);
10335
10336                 /*
10337                  * we can have no stream on ACTION_SET if a display
10338                  * was disconnected during S3, in this case it is not an
10339                  * error, the OS will be updated after detection, and
10340                  * will do the right thing on next atomic commit
10341                  */
10342
10343                 if (!new_stream) {
10344                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10345                                         __func__, acrtc->base.base.id);
10346                         ret = -ENOMEM;
10347                         goto fail;
10348                 }
10349
10350                 /*
10351                  * TODO: Check VSDB bits to decide whether this should
10352                  * be enabled or not.
10353                  */
10354                 new_stream->triggered_crtc_reset.enabled =
10355                         dm->force_timing_sync;
10356
10357                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10358
10359                 ret = fill_hdr_info_packet(drm_new_conn_state,
10360                                            &new_stream->hdr_static_metadata);
10361                 if (ret)
10362                         goto fail;
10363
10364                 /*
10365                  * If we already removed the old stream from the context
10366                  * (and set the new stream to NULL) then we can't reuse
10367                  * the old stream even if the stream and scaling are unchanged.
10368                  * We'll hit the BUG_ON and black screen.
10369                  *
10370                  * TODO: Refactor this function to allow this check to work
10371                  * in all conditions.
10372                  */
10373                 if (dm_new_crtc_state->stream &&
10374                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10375                         goto skip_modeset;
10376
10377                 if (dm_new_crtc_state->stream &&
10378                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10379                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10380                         new_crtc_state->mode_changed = false;
10381                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10382                                          new_crtc_state->mode_changed);
10383                 }
10384         }
10385
10386         /* mode_changed flag may get updated above, need to check again */
10387         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10388                 goto skip_modeset;
10389
10390         drm_dbg_state(state->dev,
10391                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10392                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10393                 "connectors_changed:%d\n",
10394                 acrtc->crtc_id,
10395                 new_crtc_state->enable,
10396                 new_crtc_state->active,
10397                 new_crtc_state->planes_changed,
10398                 new_crtc_state->mode_changed,
10399                 new_crtc_state->active_changed,
10400                 new_crtc_state->connectors_changed);
10401
10402         /* Remove stream for any changed/disabled CRTC */
10403         if (!enable) {
10404
10405                 if (!dm_old_crtc_state->stream)
10406                         goto skip_modeset;
10407
10408                 if (dm_new_crtc_state->stream &&
10409                     is_timing_unchanged_for_freesync(new_crtc_state,
10410                                                      old_crtc_state)) {
10411                         new_crtc_state->mode_changed = false;
10412                         DRM_DEBUG_DRIVER(
10413                                 "Mode change not required for front porch change, "
10414                                 "setting mode_changed to %d",
10415                                 new_crtc_state->mode_changed);
10416
10417                         set_freesync_fixed_config(dm_new_crtc_state);
10418
10419                         goto skip_modeset;
10420                 } else if (aconnector &&
10421                            is_freesync_video_mode(&new_crtc_state->mode,
10422                                                   aconnector)) {
10423                         struct drm_display_mode *high_mode;
10424
10425                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10426                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10427                                 set_freesync_fixed_config(dm_new_crtc_state);
10428                         }
10429                 }
10430
10431                 ret = dm_atomic_get_state(state, &dm_state);
10432                 if (ret)
10433                         goto fail;
10434
10435                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10436                                 crtc->base.id);
10437
10438                 /* i.e. reset mode */
10439                 if (dc_remove_stream_from_ctx(
10440                                 dm->dc,
10441                                 dm_state->context,
10442                                 dm_old_crtc_state->stream) != DC_OK) {
10443                         ret = -EINVAL;
10444                         goto fail;
10445                 }
10446
10447                 dc_stream_release(dm_old_crtc_state->stream);
10448                 dm_new_crtc_state->stream = NULL;
10449
10450                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10451
10452                 *lock_and_validation_needed = true;
10453
10454         } else {/* Add stream for any updated/enabled CRTC */
10455                 /*
10456                  * Quick fix to prevent NULL pointer on new_stream when
10457                  * added MST connectors not found in existing crtc_state in the chained mode
10458                  * TODO: need to dig out the root cause of that
10459                  */
10460                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10461                         goto skip_modeset;
10462
10463                 if (modereset_required(new_crtc_state))
10464                         goto skip_modeset;
10465
10466                 if (modeset_required(new_crtc_state, new_stream,
10467                                      dm_old_crtc_state->stream)) {
10468
10469                         WARN_ON(dm_new_crtc_state->stream);
10470
10471                         ret = dm_atomic_get_state(state, &dm_state);
10472                         if (ret)
10473                                 goto fail;
10474
10475                         dm_new_crtc_state->stream = new_stream;
10476
10477                         dc_stream_retain(new_stream);
10478
10479                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10480                                          crtc->base.id);
10481
10482                         if (dc_add_stream_to_ctx(
10483                                         dm->dc,
10484                                         dm_state->context,
10485                                         dm_new_crtc_state->stream) != DC_OK) {
10486                                 ret = -EINVAL;
10487                                 goto fail;
10488                         }
10489
10490                         *lock_and_validation_needed = true;
10491                 }
10492         }
10493
10494 skip_modeset:
10495         /* Release extra reference */
10496         if (new_stream)
10497                  dc_stream_release(new_stream);
10498
10499         /*
10500          * We want to do dc stream updates that do not require a
10501          * full modeset below.
10502          */
10503         if (!(enable && aconnector && new_crtc_state->active))
10504                 return 0;
10505         /*
10506          * Given above conditions, the dc state cannot be NULL because:
10507          * 1. We're in the process of enabling CRTCs (just been added
10508          *    to the dc context, or already is on the context)
10509          * 2. Has a valid connector attached, and
10510          * 3. Is currently active and enabled.
10511          * => The dc stream state currently exists.
10512          */
10513         BUG_ON(dm_new_crtc_state->stream == NULL);
10514
10515         /* Scaling or underscan settings */
10516         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10517                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10518                 update_stream_scaling_settings(
10519                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10520
10521         /* ABM settings */
10522         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10523
10524         /*
10525          * Color management settings. We also update color properties
10526          * when a modeset is needed, to ensure it gets reprogrammed.
10527          */
10528         if (dm_new_crtc_state->base.color_mgmt_changed ||
10529             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10530                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10531                 if (ret)
10532                         goto fail;
10533         }
10534
10535         /* Update Freesync settings. */
10536         get_freesync_config_for_crtc(dm_new_crtc_state,
10537                                      dm_new_conn_state);
10538
10539         return ret;
10540
10541 fail:
10542         if (new_stream)
10543                 dc_stream_release(new_stream);
10544         return ret;
10545 }
10546
10547 static bool should_reset_plane(struct drm_atomic_state *state,
10548                                struct drm_plane *plane,
10549                                struct drm_plane_state *old_plane_state,
10550                                struct drm_plane_state *new_plane_state)
10551 {
10552         struct drm_plane *other;
10553         struct drm_plane_state *old_other_state, *new_other_state;
10554         struct drm_crtc_state *new_crtc_state;
10555         int i;
10556
10557         /*
10558          * TODO: Remove this hack once the checks below are sufficient
10559          * enough to determine when we need to reset all the planes on
10560          * the stream.
10561          */
10562         if (state->allow_modeset)
10563                 return true;
10564
10565         /* Exit early if we know that we're adding or removing the plane. */
10566         if (old_plane_state->crtc != new_plane_state->crtc)
10567                 return true;
10568
10569         /* old crtc == new_crtc == NULL, plane not in context. */
10570         if (!new_plane_state->crtc)
10571                 return false;
10572
10573         new_crtc_state =
10574                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10575
10576         if (!new_crtc_state)
10577                 return true;
10578
10579         /* CRTC Degamma changes currently require us to recreate planes. */
10580         if (new_crtc_state->color_mgmt_changed)
10581                 return true;
10582
10583         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10584                 return true;
10585
10586         /*
10587          * If there are any new primary or overlay planes being added or
10588          * removed then the z-order can potentially change. To ensure
10589          * correct z-order and pipe acquisition the current DC architecture
10590          * requires us to remove and recreate all existing planes.
10591          *
10592          * TODO: Come up with a more elegant solution for this.
10593          */
10594         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10595                 struct amdgpu_framebuffer *old_afb, *new_afb;
10596                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10597                         continue;
10598
10599                 if (old_other_state->crtc != new_plane_state->crtc &&
10600                     new_other_state->crtc != new_plane_state->crtc)
10601                         continue;
10602
10603                 if (old_other_state->crtc != new_other_state->crtc)
10604                         return true;
10605
10606                 /* Src/dst size and scaling updates. */
10607                 if (old_other_state->src_w != new_other_state->src_w ||
10608                     old_other_state->src_h != new_other_state->src_h ||
10609                     old_other_state->crtc_w != new_other_state->crtc_w ||
10610                     old_other_state->crtc_h != new_other_state->crtc_h)
10611                         return true;
10612
10613                 /* Rotation / mirroring updates. */
10614                 if (old_other_state->rotation != new_other_state->rotation)
10615                         return true;
10616
10617                 /* Blending updates. */
10618                 if (old_other_state->pixel_blend_mode !=
10619                     new_other_state->pixel_blend_mode)
10620                         return true;
10621
10622                 /* Alpha updates. */
10623                 if (old_other_state->alpha != new_other_state->alpha)
10624                         return true;
10625
10626                 /* Colorspace changes. */
10627                 if (old_other_state->color_range != new_other_state->color_range ||
10628                     old_other_state->color_encoding != new_other_state->color_encoding)
10629                         return true;
10630
10631                 /* Framebuffer checks fall at the end. */
10632                 if (!old_other_state->fb || !new_other_state->fb)
10633                         continue;
10634
10635                 /* Pixel format changes can require bandwidth updates. */
10636                 if (old_other_state->fb->format != new_other_state->fb->format)
10637                         return true;
10638
10639                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10640                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10641
10642                 /* Tiling and DCC changes also require bandwidth updates. */
10643                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10644                     old_afb->base.modifier != new_afb->base.modifier)
10645                         return true;
10646         }
10647
10648         return false;
10649 }
10650
10651 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10652                               struct drm_plane_state *new_plane_state,
10653                               struct drm_framebuffer *fb)
10654 {
10655         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10656         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10657         unsigned int pitch;
10658         bool linear;
10659
10660         if (fb->width > new_acrtc->max_cursor_width ||
10661             fb->height > new_acrtc->max_cursor_height) {
10662                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10663                                  new_plane_state->fb->width,
10664                                  new_plane_state->fb->height);
10665                 return -EINVAL;
10666         }
10667         if (new_plane_state->src_w != fb->width << 16 ||
10668             new_plane_state->src_h != fb->height << 16) {
10669                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10670                 return -EINVAL;
10671         }
10672
10673         /* Pitch in pixels */
10674         pitch = fb->pitches[0] / fb->format->cpp[0];
10675
10676         if (fb->width != pitch) {
10677                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10678                                  fb->width, pitch);
10679                 return -EINVAL;
10680         }
10681
10682         switch (pitch) {
10683         case 64:
10684         case 128:
10685         case 256:
10686                 /* FB pitch is supported by cursor plane */
10687                 break;
10688         default:
10689                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10690                 return -EINVAL;
10691         }
10692
10693         /* Core DRM takes care of checking FB modifiers, so we only need to
10694          * check tiling flags when the FB doesn't have a modifier. */
10695         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10696                 if (adev->family < AMDGPU_FAMILY_AI) {
10697                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10698                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10699                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10700                 } else {
10701                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10702                 }
10703                 if (!linear) {
10704                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10705                         return -EINVAL;
10706                 }
10707         }
10708
10709         return 0;
10710 }
10711
10712 static int dm_update_plane_state(struct dc *dc,
10713                                  struct drm_atomic_state *state,
10714                                  struct drm_plane *plane,
10715                                  struct drm_plane_state *old_plane_state,
10716                                  struct drm_plane_state *new_plane_state,
10717                                  bool enable,
10718                                  bool *lock_and_validation_needed)
10719 {
10720
10721         struct dm_atomic_state *dm_state = NULL;
10722         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10723         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10724         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10725         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10726         struct amdgpu_crtc *new_acrtc;
10727         bool needs_reset;
10728         int ret = 0;
10729
10730
10731         new_plane_crtc = new_plane_state->crtc;
10732         old_plane_crtc = old_plane_state->crtc;
10733         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10734         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10735
10736         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10737                 if (!enable || !new_plane_crtc ||
10738                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10739                         return 0;
10740
10741                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10742
10743                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10744                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10745                         return -EINVAL;
10746                 }
10747
10748                 if (new_plane_state->fb) {
10749                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10750                                                  new_plane_state->fb);
10751                         if (ret)
10752                                 return ret;
10753                 }
10754
10755                 return 0;
10756         }
10757
10758         needs_reset = should_reset_plane(state, plane, old_plane_state,
10759                                          new_plane_state);
10760
10761         /* Remove any changed/removed planes */
10762         if (!enable) {
10763                 if (!needs_reset)
10764                         return 0;
10765
10766                 if (!old_plane_crtc)
10767                         return 0;
10768
10769                 old_crtc_state = drm_atomic_get_old_crtc_state(
10770                                 state, old_plane_crtc);
10771                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10772
10773                 if (!dm_old_crtc_state->stream)
10774                         return 0;
10775
10776                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10777                                 plane->base.id, old_plane_crtc->base.id);
10778
10779                 ret = dm_atomic_get_state(state, &dm_state);
10780                 if (ret)
10781                         return ret;
10782
10783                 if (!dc_remove_plane_from_context(
10784                                 dc,
10785                                 dm_old_crtc_state->stream,
10786                                 dm_old_plane_state->dc_state,
10787                                 dm_state->context)) {
10788
10789                         return -EINVAL;
10790                 }
10791
10792
10793                 dc_plane_state_release(dm_old_plane_state->dc_state);
10794                 dm_new_plane_state->dc_state = NULL;
10795
10796                 *lock_and_validation_needed = true;
10797
10798         } else { /* Add new planes */
10799                 struct dc_plane_state *dc_new_plane_state;
10800
10801                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10802                         return 0;
10803
10804                 if (!new_plane_crtc)
10805                         return 0;
10806
10807                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10808                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10809
10810                 if (!dm_new_crtc_state->stream)
10811                         return 0;
10812
10813                 if (!needs_reset)
10814                         return 0;
10815
10816                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10817                 if (ret)
10818                         return ret;
10819
10820                 WARN_ON(dm_new_plane_state->dc_state);
10821
10822                 dc_new_plane_state = dc_create_plane_state(dc);
10823                 if (!dc_new_plane_state)
10824                         return -ENOMEM;
10825
10826                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10827                                  plane->base.id, new_plane_crtc->base.id);
10828
10829                 ret = fill_dc_plane_attributes(
10830                         drm_to_adev(new_plane_crtc->dev),
10831                         dc_new_plane_state,
10832                         new_plane_state,
10833                         new_crtc_state);
10834                 if (ret) {
10835                         dc_plane_state_release(dc_new_plane_state);
10836                         return ret;
10837                 }
10838
10839                 ret = dm_atomic_get_state(state, &dm_state);
10840                 if (ret) {
10841                         dc_plane_state_release(dc_new_plane_state);
10842                         return ret;
10843                 }
10844
10845                 /*
10846                  * Any atomic check errors that occur after this will
10847                  * not need a release. The plane state will be attached
10848                  * to the stream, and therefore part of the atomic
10849                  * state. It'll be released when the atomic state is
10850                  * cleaned.
10851                  */
10852                 if (!dc_add_plane_to_context(
10853                                 dc,
10854                                 dm_new_crtc_state->stream,
10855                                 dc_new_plane_state,
10856                                 dm_state->context)) {
10857
10858                         dc_plane_state_release(dc_new_plane_state);
10859                         return -EINVAL;
10860                 }
10861
10862                 dm_new_plane_state->dc_state = dc_new_plane_state;
10863
10864                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10865
10866                 /* Tell DC to do a full surface update every time there
10867                  * is a plane change. Inefficient, but works for now.
10868                  */
10869                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10870
10871                 *lock_and_validation_needed = true;
10872         }
10873
10874
10875         return ret;
10876 }
10877
10878 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10879                                        int *src_w, int *src_h)
10880 {
10881         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10882         case DRM_MODE_ROTATE_90:
10883         case DRM_MODE_ROTATE_270:
10884                 *src_w = plane_state->src_h >> 16;
10885                 *src_h = plane_state->src_w >> 16;
10886                 break;
10887         case DRM_MODE_ROTATE_0:
10888         case DRM_MODE_ROTATE_180:
10889         default:
10890                 *src_w = plane_state->src_w >> 16;
10891                 *src_h = plane_state->src_h >> 16;
10892                 break;
10893         }
10894 }
10895
10896 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10897                                 struct drm_crtc *crtc,
10898                                 struct drm_crtc_state *new_crtc_state)
10899 {
10900         struct drm_plane *cursor = crtc->cursor, *underlying;
10901         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10902         int i;
10903         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10904         int cursor_src_w, cursor_src_h;
10905         int underlying_src_w, underlying_src_h;
10906
10907         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10908          * cursor per pipe but it's going to inherit the scaling and
10909          * positioning from the underlying pipe. Check the cursor plane's
10910          * blending properties match the underlying planes'. */
10911
10912         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10913         if (!new_cursor_state || !new_cursor_state->fb) {
10914                 return 0;
10915         }
10916
10917         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10918         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10919         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10920
10921         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10922                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10923                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10924                         continue;
10925
10926                 /* Ignore disabled planes */
10927                 if (!new_underlying_state->fb)
10928                         continue;
10929
10930                 dm_get_oriented_plane_size(new_underlying_state,
10931                                            &underlying_src_w, &underlying_src_h);
10932                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10933                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10934
10935                 if (cursor_scale_w != underlying_scale_w ||
10936                     cursor_scale_h != underlying_scale_h) {
10937                         drm_dbg_atomic(crtc->dev,
10938                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10939                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10940                         return -EINVAL;
10941                 }
10942
10943                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10944                 if (new_underlying_state->crtc_x <= 0 &&
10945                     new_underlying_state->crtc_y <= 0 &&
10946                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10947                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10948                         break;
10949         }
10950
10951         return 0;
10952 }
10953
10954 #if defined(CONFIG_DRM_AMD_DC_DCN)
10955 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10956 {
10957         struct drm_connector *connector;
10958         struct drm_connector_state *conn_state, *old_conn_state;
10959         struct amdgpu_dm_connector *aconnector = NULL;
10960         int i;
10961         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10962                 if (!conn_state->crtc)
10963                         conn_state = old_conn_state;
10964
10965                 if (conn_state->crtc != crtc)
10966                         continue;
10967
10968                 aconnector = to_amdgpu_dm_connector(connector);
10969                 if (!aconnector->port || !aconnector->mst_port)
10970                         aconnector = NULL;
10971                 else
10972                         break;
10973         }
10974
10975         if (!aconnector)
10976                 return 0;
10977
10978         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10979 }
10980 #endif
10981
10982 /**
10983  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10984  * @dev: The DRM device
10985  * @state: The atomic state to commit
10986  *
10987  * Validate that the given atomic state is programmable by DC into hardware.
10988  * This involves constructing a &struct dc_state reflecting the new hardware
10989  * state we wish to commit, then querying DC to see if it is programmable. It's
10990  * important not to modify the existing DC state. Otherwise, atomic_check
10991  * may unexpectedly commit hardware changes.
10992  *
10993  * When validating the DC state, it's important that the right locks are
10994  * acquired. For full updates case which removes/adds/updates streams on one
10995  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10996  * that any such full update commit will wait for completion of any outstanding
10997  * flip using DRMs synchronization events.
10998  *
10999  * Note that DM adds the affected connectors for all CRTCs in state, when that
11000  * might not seem necessary. This is because DC stream creation requires the
11001  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11002  * be possible but non-trivial - a possible TODO item.
11003  *
11004  * Return: -Error code if validation failed.
11005  */
11006 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11007                                   struct drm_atomic_state *state)
11008 {
11009         struct amdgpu_device *adev = drm_to_adev(dev);
11010         struct dm_atomic_state *dm_state = NULL;
11011         struct dc *dc = adev->dm.dc;
11012         struct drm_connector *connector;
11013         struct drm_connector_state *old_con_state, *new_con_state;
11014         struct drm_crtc *crtc;
11015         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11016         struct drm_plane *plane;
11017         struct drm_plane_state *old_plane_state, *new_plane_state;
11018         enum dc_status status;
11019         int ret, i;
11020         bool lock_and_validation_needed = false;
11021         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11022 #if defined(CONFIG_DRM_AMD_DC_DCN)
11023         struct dsc_mst_fairness_vars vars[MAX_PIPES];
11024         struct drm_dp_mst_topology_state *mst_state;
11025         struct drm_dp_mst_topology_mgr *mgr;
11026 #endif
11027
11028         trace_amdgpu_dm_atomic_check_begin(state);
11029
11030         ret = drm_atomic_helper_check_modeset(dev, state);
11031         if (ret) {
11032                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11033                 goto fail;
11034         }
11035
11036         /* Check connector changes */
11037         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11038                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11039                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11040
11041                 /* Skip connectors that are disabled or part of modeset already. */
11042                 if (!old_con_state->crtc && !new_con_state->crtc)
11043                         continue;
11044
11045                 if (!new_con_state->crtc)
11046                         continue;
11047
11048                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11049                 if (IS_ERR(new_crtc_state)) {
11050                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11051                         ret = PTR_ERR(new_crtc_state);
11052                         goto fail;
11053                 }
11054
11055                 if (dm_old_con_state->abm_level !=
11056                     dm_new_con_state->abm_level)
11057                         new_crtc_state->connectors_changed = true;
11058         }
11059
11060 #if defined(CONFIG_DRM_AMD_DC_DCN)
11061         if (dc_resource_is_dsc_encoding_supported(dc)) {
11062                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11063                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11064                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11065                                 if (ret) {
11066                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11067                                         goto fail;
11068                                 }
11069                         }
11070                 }
11071                 pre_validate_dsc(state, &dm_state, vars);
11072         }
11073 #endif
11074         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11075                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11076
11077                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11078                     !new_crtc_state->color_mgmt_changed &&
11079                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11080                         dm_old_crtc_state->dsc_force_changed == false)
11081                         continue;
11082
11083                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11084                 if (ret) {
11085                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11086                         goto fail;
11087                 }
11088
11089                 if (!new_crtc_state->enable)
11090                         continue;
11091
11092                 ret = drm_atomic_add_affected_connectors(state, crtc);
11093                 if (ret) {
11094                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11095                         goto fail;
11096                 }
11097
11098                 ret = drm_atomic_add_affected_planes(state, crtc);
11099                 if (ret) {
11100                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11101                         goto fail;
11102                 }
11103
11104                 if (dm_old_crtc_state->dsc_force_changed)
11105                         new_crtc_state->mode_changed = true;
11106         }
11107
11108         /*
11109          * Add all primary and overlay planes on the CRTC to the state
11110          * whenever a plane is enabled to maintain correct z-ordering
11111          * and to enable fast surface updates.
11112          */
11113         drm_for_each_crtc(crtc, dev) {
11114                 bool modified = false;
11115
11116                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11117                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11118                                 continue;
11119
11120                         if (new_plane_state->crtc == crtc ||
11121                             old_plane_state->crtc == crtc) {
11122                                 modified = true;
11123                                 break;
11124                         }
11125                 }
11126
11127                 if (!modified)
11128                         continue;
11129
11130                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11131                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11132                                 continue;
11133
11134                         new_plane_state =
11135                                 drm_atomic_get_plane_state(state, plane);
11136
11137                         if (IS_ERR(new_plane_state)) {
11138                                 ret = PTR_ERR(new_plane_state);
11139                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11140                                 goto fail;
11141                         }
11142                 }
11143         }
11144
11145         /* Remove exiting planes if they are modified */
11146         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11147                 ret = dm_update_plane_state(dc, state, plane,
11148                                             old_plane_state,
11149                                             new_plane_state,
11150                                             false,
11151                                             &lock_and_validation_needed);
11152                 if (ret) {
11153                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11154                         goto fail;
11155                 }
11156         }
11157
11158         /* Disable all crtcs which require disable */
11159         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11160                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11161                                            old_crtc_state,
11162                                            new_crtc_state,
11163                                            false,
11164                                            &lock_and_validation_needed);
11165                 if (ret) {
11166                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11167                         goto fail;
11168                 }
11169         }
11170
11171         /* Enable all crtcs which require enable */
11172         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11173                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11174                                            old_crtc_state,
11175                                            new_crtc_state,
11176                                            true,
11177                                            &lock_and_validation_needed);
11178                 if (ret) {
11179                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11180                         goto fail;
11181                 }
11182         }
11183
11184         /* Add new/modified planes */
11185         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11186                 ret = dm_update_plane_state(dc, state, plane,
11187                                             old_plane_state,
11188                                             new_plane_state,
11189                                             true,
11190                                             &lock_and_validation_needed);
11191                 if (ret) {
11192                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11193                         goto fail;
11194                 }
11195         }
11196
11197         /* Run this here since we want to validate the streams we created */
11198         ret = drm_atomic_helper_check_planes(dev, state);
11199         if (ret) {
11200                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11201                 goto fail;
11202         }
11203
11204         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11205                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11206                 if (dm_new_crtc_state->mpo_requested)
11207                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11208         }
11209
11210         /* Check cursor planes scaling */
11211         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11212                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11213                 if (ret) {
11214                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11215                         goto fail;
11216                 }
11217         }
11218
11219         if (state->legacy_cursor_update) {
11220                 /*
11221                  * This is a fast cursor update coming from the plane update
11222                  * helper, check if it can be done asynchronously for better
11223                  * performance.
11224                  */
11225                 state->async_update =
11226                         !drm_atomic_helper_async_check(dev, state);
11227
11228                 /*
11229                  * Skip the remaining global validation if this is an async
11230                  * update. Cursor updates can be done without affecting
11231                  * state or bandwidth calcs and this avoids the performance
11232                  * penalty of locking the private state object and
11233                  * allocating a new dc_state.
11234                  */
11235                 if (state->async_update)
11236                         return 0;
11237         }
11238
11239         /* Check scaling and underscan changes*/
11240         /* TODO Removed scaling changes validation due to inability to commit
11241          * new stream into context w\o causing full reset. Need to
11242          * decide how to handle.
11243          */
11244         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11245                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11246                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11247                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11248
11249                 /* Skip any modesets/resets */
11250                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11251                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11252                         continue;
11253
11254                 /* Skip any thing not scale or underscan changes */
11255                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11256                         continue;
11257
11258                 lock_and_validation_needed = true;
11259         }
11260
11261 #if defined(CONFIG_DRM_AMD_DC_DCN)
11262         /* set the slot info for each mst_state based on the link encoding format */
11263         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11264                 struct amdgpu_dm_connector *aconnector;
11265                 struct drm_connector *connector;
11266                 struct drm_connector_list_iter iter;
11267                 u8 link_coding_cap;
11268
11269                 if (!mgr->mst_state )
11270                         continue;
11271
11272                 drm_connector_list_iter_begin(dev, &iter);
11273                 drm_for_each_connector_iter(connector, &iter) {
11274                         int id = connector->index;
11275
11276                         if (id == mst_state->mgr->conn_base_id) {
11277                                 aconnector = to_amdgpu_dm_connector(connector);
11278                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11279                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11280
11281                                 break;
11282                         }
11283                 }
11284                 drm_connector_list_iter_end(&iter);
11285
11286         }
11287 #endif
11288         /**
11289          * Streams and planes are reset when there are changes that affect
11290          * bandwidth. Anything that affects bandwidth needs to go through
11291          * DC global validation to ensure that the configuration can be applied
11292          * to hardware.
11293          *
11294          * We have to currently stall out here in atomic_check for outstanding
11295          * commits to finish in this case because our IRQ handlers reference
11296          * DRM state directly - we can end up disabling interrupts too early
11297          * if we don't.
11298          *
11299          * TODO: Remove this stall and drop DM state private objects.
11300          */
11301         if (lock_and_validation_needed) {
11302                 ret = dm_atomic_get_state(state, &dm_state);
11303                 if (ret) {
11304                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11305                         goto fail;
11306                 }
11307
11308                 ret = do_aquire_global_lock(dev, state);
11309                 if (ret) {
11310                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11311                         goto fail;
11312                 }
11313
11314 #if defined(CONFIG_DRM_AMD_DC_DCN)
11315                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11316                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11317                         goto fail;
11318                 }
11319
11320                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11321                 if (ret) {
11322                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11323                         goto fail;
11324                 }
11325 #endif
11326
11327                 /*
11328                  * Perform validation of MST topology in the state:
11329                  * We need to perform MST atomic check before calling
11330                  * dc_validate_global_state(), or there is a chance
11331                  * to get stuck in an infinite loop and hang eventually.
11332                  */
11333                 ret = drm_dp_mst_atomic_check(state);
11334                 if (ret) {
11335                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11336                         goto fail;
11337                 }
11338                 status = dc_validate_global_state(dc, dm_state->context, true);
11339                 if (status != DC_OK) {
11340                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11341                                        dc_status_to_str(status), status);
11342                         ret = -EINVAL;
11343                         goto fail;
11344                 }
11345         } else {
11346                 /*
11347                  * The commit is a fast update. Fast updates shouldn't change
11348                  * the DC context, affect global validation, and can have their
11349                  * commit work done in parallel with other commits not touching
11350                  * the same resource. If we have a new DC context as part of
11351                  * the DM atomic state from validation we need to free it and
11352                  * retain the existing one instead.
11353                  *
11354                  * Furthermore, since the DM atomic state only contains the DC
11355                  * context and can safely be annulled, we can free the state
11356                  * and clear the associated private object now to free
11357                  * some memory and avoid a possible use-after-free later.
11358                  */
11359
11360                 for (i = 0; i < state->num_private_objs; i++) {
11361                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11362
11363                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11364                                 int j = state->num_private_objs-1;
11365
11366                                 dm_atomic_destroy_state(obj,
11367                                                 state->private_objs[i].state);
11368
11369                                 /* If i is not at the end of the array then the
11370                                  * last element needs to be moved to where i was
11371                                  * before the array can safely be truncated.
11372                                  */
11373                                 if (i != j)
11374                                         state->private_objs[i] =
11375                                                 state->private_objs[j];
11376
11377                                 state->private_objs[j].ptr = NULL;
11378                                 state->private_objs[j].state = NULL;
11379                                 state->private_objs[j].old_state = NULL;
11380                                 state->private_objs[j].new_state = NULL;
11381
11382                                 state->num_private_objs = j;
11383                                 break;
11384                         }
11385                 }
11386         }
11387
11388         /* Store the overall update type for use later in atomic check. */
11389         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11390                 struct dm_crtc_state *dm_new_crtc_state =
11391                         to_dm_crtc_state(new_crtc_state);
11392
11393                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11394                                                          UPDATE_TYPE_FULL :
11395                                                          UPDATE_TYPE_FAST;
11396         }
11397
11398         /* Must be success */
11399         WARN_ON(ret);
11400
11401         trace_amdgpu_dm_atomic_check_finish(state, ret);
11402
11403         return ret;
11404
11405 fail:
11406         if (ret == -EDEADLK)
11407                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11408         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11409                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11410         else
11411                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11412
11413         trace_amdgpu_dm_atomic_check_finish(state, ret);
11414
11415         return ret;
11416 }
11417
11418 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11419                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11420 {
11421         uint8_t dpcd_data;
11422         bool capable = false;
11423
11424         if (amdgpu_dm_connector->dc_link &&
11425                 dm_helpers_dp_read_dpcd(
11426                                 NULL,
11427                                 amdgpu_dm_connector->dc_link,
11428                                 DP_DOWN_STREAM_PORT_COUNT,
11429                                 &dpcd_data,
11430                                 sizeof(dpcd_data))) {
11431                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11432         }
11433
11434         return capable;
11435 }
11436
11437 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11438                 unsigned int offset,
11439                 unsigned int total_length,
11440                 uint8_t *data,
11441                 unsigned int length,
11442                 struct amdgpu_hdmi_vsdb_info *vsdb)
11443 {
11444         bool res;
11445         union dmub_rb_cmd cmd;
11446         struct dmub_cmd_send_edid_cea *input;
11447         struct dmub_cmd_edid_cea_output *output;
11448
11449         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11450                 return false;
11451
11452         memset(&cmd, 0, sizeof(cmd));
11453
11454         input = &cmd.edid_cea.data.input;
11455
11456         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11457         cmd.edid_cea.header.sub_type = 0;
11458         cmd.edid_cea.header.payload_bytes =
11459                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11460         input->offset = offset;
11461         input->length = length;
11462         input->cea_total_length = total_length;
11463         memcpy(input->payload, data, length);
11464
11465         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11466         if (!res) {
11467                 DRM_ERROR("EDID CEA parser failed\n");
11468                 return false;
11469         }
11470
11471         output = &cmd.edid_cea.data.output;
11472
11473         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11474                 if (!output->ack.success) {
11475                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11476                                         output->ack.offset);
11477                 }
11478         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11479                 if (!output->amd_vsdb.vsdb_found)
11480                         return false;
11481
11482                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11483                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11484                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11485                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11486         } else {
11487                 DRM_WARN("Unknown EDID CEA parser results\n");
11488                 return false;
11489         }
11490
11491         return true;
11492 }
11493
11494 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11495                 uint8_t *edid_ext, int len,
11496                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11497 {
11498         int i;
11499
11500         /* send extension block to DMCU for parsing */
11501         for (i = 0; i < len; i += 8) {
11502                 bool res;
11503                 int offset;
11504
11505                 /* send 8 bytes a time */
11506                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11507                         return false;
11508
11509                 if (i+8 == len) {
11510                         /* EDID block sent completed, expect result */
11511                         int version, min_rate, max_rate;
11512
11513                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11514                         if (res) {
11515                                 /* amd vsdb found */
11516                                 vsdb_info->freesync_supported = 1;
11517                                 vsdb_info->amd_vsdb_version = version;
11518                                 vsdb_info->min_refresh_rate_hz = min_rate;
11519                                 vsdb_info->max_refresh_rate_hz = max_rate;
11520                                 return true;
11521                         }
11522                         /* not amd vsdb */
11523                         return false;
11524                 }
11525
11526                 /* check for ack*/
11527                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11528                 if (!res)
11529                         return false;
11530         }
11531
11532         return false;
11533 }
11534
11535 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11536                 uint8_t *edid_ext, int len,
11537                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11538 {
11539         int i;
11540
11541         /* send extension block to DMCU for parsing */
11542         for (i = 0; i < len; i += 8) {
11543                 /* send 8 bytes a time */
11544                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11545                         return false;
11546         }
11547
11548         return vsdb_info->freesync_supported;
11549 }
11550
11551 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11552                 uint8_t *edid_ext, int len,
11553                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11554 {
11555         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11556
11557         if (adev->dm.dmub_srv)
11558                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11559         else
11560                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11561 }
11562
11563 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11564                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11565 {
11566         uint8_t *edid_ext = NULL;
11567         int i;
11568         bool valid_vsdb_found = false;
11569
11570         /*----- drm_find_cea_extension() -----*/
11571         /* No EDID or EDID extensions */
11572         if (edid == NULL || edid->extensions == 0)
11573                 return -ENODEV;
11574
11575         /* Find CEA extension */
11576         for (i = 0; i < edid->extensions; i++) {
11577                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11578                 if (edid_ext[0] == CEA_EXT)
11579                         break;
11580         }
11581
11582         if (i == edid->extensions)
11583                 return -ENODEV;
11584
11585         /*----- cea_db_offsets() -----*/
11586         if (edid_ext[0] != CEA_EXT)
11587                 return -ENODEV;
11588
11589         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11590
11591         return valid_vsdb_found ? i : -ENODEV;
11592 }
11593
11594 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11595                                         struct edid *edid)
11596 {
11597         int i = 0;
11598         struct detailed_timing *timing;
11599         struct detailed_non_pixel *data;
11600         struct detailed_data_monitor_range *range;
11601         struct amdgpu_dm_connector *amdgpu_dm_connector =
11602                         to_amdgpu_dm_connector(connector);
11603         struct dm_connector_state *dm_con_state = NULL;
11604         struct dc_sink *sink;
11605
11606         struct drm_device *dev = connector->dev;
11607         struct amdgpu_device *adev = drm_to_adev(dev);
11608         bool freesync_capable = false;
11609         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11610
11611         if (!connector->state) {
11612                 DRM_ERROR("%s - Connector has no state", __func__);
11613                 goto update;
11614         }
11615
11616         sink = amdgpu_dm_connector->dc_sink ?
11617                 amdgpu_dm_connector->dc_sink :
11618                 amdgpu_dm_connector->dc_em_sink;
11619
11620         if (!edid || !sink) {
11621                 dm_con_state = to_dm_connector_state(connector->state);
11622
11623                 amdgpu_dm_connector->min_vfreq = 0;
11624                 amdgpu_dm_connector->max_vfreq = 0;
11625                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11626                 connector->display_info.monitor_range.min_vfreq = 0;
11627                 connector->display_info.monitor_range.max_vfreq = 0;
11628                 freesync_capable = false;
11629
11630                 goto update;
11631         }
11632
11633         dm_con_state = to_dm_connector_state(connector->state);
11634
11635         if (!adev->dm.freesync_module)
11636                 goto update;
11637
11638
11639         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11640                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11641                 bool edid_check_required = false;
11642
11643                 if (edid) {
11644                         edid_check_required = is_dp_capable_without_timing_msa(
11645                                                 adev->dm.dc,
11646                                                 amdgpu_dm_connector);
11647                 }
11648
11649                 if (edid_check_required == true && (edid->version > 1 ||
11650                    (edid->version == 1 && edid->revision > 1))) {
11651                         for (i = 0; i < 4; i++) {
11652
11653                                 timing  = &edid->detailed_timings[i];
11654                                 data    = &timing->data.other_data;
11655                                 range   = &data->data.range;
11656                                 /*
11657                                  * Check if monitor has continuous frequency mode
11658                                  */
11659                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11660                                         continue;
11661                                 /*
11662                                  * Check for flag range limits only. If flag == 1 then
11663                                  * no additional timing information provided.
11664                                  * Default GTF, GTF Secondary curve and CVT are not
11665                                  * supported
11666                                  */
11667                                 if (range->flags != 1)
11668                                         continue;
11669
11670                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11671                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11672                                 amdgpu_dm_connector->pixel_clock_mhz =
11673                                         range->pixel_clock_mhz * 10;
11674
11675                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11676                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11677
11678                                 break;
11679                         }
11680
11681                         if (amdgpu_dm_connector->max_vfreq -
11682                             amdgpu_dm_connector->min_vfreq > 10) {
11683
11684                                 freesync_capable = true;
11685                         }
11686                 }
11687         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11688                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11689                 if (i >= 0 && vsdb_info.freesync_supported) {
11690                         timing  = &edid->detailed_timings[i];
11691                         data    = &timing->data.other_data;
11692
11693                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11694                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11695                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11696                                 freesync_capable = true;
11697
11698                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11699                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11700                 }
11701         }
11702
11703 update:
11704         if (dm_con_state)
11705                 dm_con_state->freesync_capable = freesync_capable;
11706
11707         if (connector->vrr_capable_property)
11708                 drm_connector_set_vrr_capable_property(connector,
11709                                                        freesync_capable);
11710 }
11711
11712 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11713 {
11714         struct amdgpu_device *adev = drm_to_adev(dev);
11715         struct dc *dc = adev->dm.dc;
11716         int i;
11717
11718         mutex_lock(&adev->dm.dc_lock);
11719         if (dc->current_state) {
11720                 for (i = 0; i < dc->current_state->stream_count; ++i)
11721                         dc->current_state->streams[i]
11722                                 ->triggered_crtc_reset.enabled =
11723                                 adev->dm.force_timing_sync;
11724
11725                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11726                 dc_trigger_sync(dc, dc->current_state);
11727         }
11728         mutex_unlock(&adev->dm.dc_lock);
11729 }
11730
11731 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11732                        uint32_t value, const char *func_name)
11733 {
11734 #ifdef DM_CHECK_ADDR_0
11735         if (address == 0) {
11736                 DC_ERR("invalid register write. address = 0");
11737                 return;
11738         }
11739 #endif
11740         cgs_write_register(ctx->cgs_device, address, value);
11741         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11742 }
11743
11744 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11745                           const char *func_name)
11746 {
11747         uint32_t value;
11748 #ifdef DM_CHECK_ADDR_0
11749         if (address == 0) {
11750                 DC_ERR("invalid register read; address = 0\n");
11751                 return 0;
11752         }
11753 #endif
11754
11755         if (ctx->dmub_srv &&
11756             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11757             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11758                 ASSERT(false);
11759                 return 0;
11760         }
11761
11762         value = cgs_read_register(ctx->cgs_device, address);
11763
11764         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11765
11766         return value;
11767 }
11768
11769 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11770                                                 struct dc_context *ctx,
11771                                                 uint8_t status_type,
11772                                                 uint32_t *operation_result)
11773 {
11774         struct amdgpu_device *adev = ctx->driver_context;
11775         int return_status = -1;
11776         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11777
11778         if (is_cmd_aux) {
11779                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11780                         return_status = p_notify->aux_reply.length;
11781                         *operation_result = p_notify->result;
11782                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11783                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11784                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11785                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11786                 } else {
11787                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11788                 }
11789         } else {
11790                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11791                         return_status = 0;
11792                         *operation_result = p_notify->sc_status;
11793                 } else {
11794                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11795                 }
11796         }
11797
11798         return return_status;
11799 }
11800
11801 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11802         unsigned int link_index, void *cmd_payload, void *operation_result)
11803 {
11804         struct amdgpu_device *adev = ctx->driver_context;
11805         int ret = 0;
11806
11807         if (is_cmd_aux) {
11808                 dc_process_dmub_aux_transfer_async(ctx->dc,
11809                         link_index, (struct aux_payload *)cmd_payload);
11810         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11811                                         (struct set_config_cmd_payload *)cmd_payload,
11812                                         adev->dm.dmub_notify)) {
11813                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11814                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11815                                         (uint32_t *)operation_result);
11816         }
11817
11818         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11819         if (ret == 0) {
11820                 DRM_ERROR("wait_for_completion_timeout timeout!");
11821                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11822                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11823                                 (uint32_t *)operation_result);
11824         }
11825
11826         if (is_cmd_aux) {
11827                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11828                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11829
11830                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11831                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11832                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11833                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11834                                        adev->dm.dmub_notify->aux_reply.length);
11835                         }
11836                 }
11837         }
11838
11839         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11840                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11841                         (uint32_t *)operation_result);
11842 }
11843
11844 /*
11845  * Check whether seamless boot is supported.
11846  *
11847  * So far we only support seamless boot on CHIP_VANGOGH.
11848  * If everything goes well, we may consider expanding
11849  * seamless boot to other ASICs.
11850  */
11851 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11852 {
11853         switch (adev->asic_type) {
11854         case CHIP_VANGOGH:
11855                 if (!adev->mman.keep_stolen_vga_memory)
11856                         return true;
11857                 break;
11858         default:
11859                 break;
11860         }
11861
11862         return false;
11863 }
This page took 0.78739 seconds and 4 git commands to generate.