]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Enable DCN314 in DM
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "soc15_common.h"
94 #include "vega10_ip_offset.h"
95
96 #include "soc15_common.h"
97
98 #include "gc/gc_11_0_0_offset.h"
99 #include "gc/gc_11_0_0_sh_mask.h"
100
101 #include "modules/inc/mod_freesync.h"
102 #include "modules/power/power_helpers.h"
103 #include "modules/inc/mod_info_packet.h"
104
105 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
107 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
109 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
111 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
113 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
115 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
117 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
119 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
121 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
123 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
125 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
127
128 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
130 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
132
133 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
134 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
135
136 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
137 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
138
139 /* Number of bytes in PSP header for firmware. */
140 #define PSP_HEADER_BYTES 0x100
141
142 /* Number of bytes in PSP footer for firmware. */
143 #define PSP_FOOTER_BYTES 0x100
144
145 /**
146  * DOC: overview
147  *
148  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
149  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
150  * requests into DC requests, and DC responses into DRM responses.
151  *
152  * The root control structure is &struct amdgpu_display_manager.
153  */
154
155 /* basic init/fini API */
156 static int amdgpu_dm_init(struct amdgpu_device *adev);
157 static void amdgpu_dm_fini(struct amdgpu_device *adev);
158 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
159
160 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
161 {
162         switch (link->dpcd_caps.dongle_type) {
163         case DISPLAY_DONGLE_NONE:
164                 return DRM_MODE_SUBCONNECTOR_Native;
165         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
166                 return DRM_MODE_SUBCONNECTOR_VGA;
167         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
168         case DISPLAY_DONGLE_DP_DVI_DONGLE:
169                 return DRM_MODE_SUBCONNECTOR_DVID;
170         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
171         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
172                 return DRM_MODE_SUBCONNECTOR_HDMIA;
173         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
174         default:
175                 return DRM_MODE_SUBCONNECTOR_Unknown;
176         }
177 }
178
179 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
180 {
181         struct dc_link *link = aconnector->dc_link;
182         struct drm_connector *connector = &aconnector->base;
183         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
184
185         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
186                 return;
187
188         if (aconnector->dc_sink)
189                 subconnector = get_subconnector_type(link);
190
191         drm_object_property_set_value(&connector->base,
192                         connector->dev->mode_config.dp_subconnector_property,
193                         subconnector);
194 }
195
196 /*
197  * initializes drm_device display related structures, based on the information
198  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
199  * drm_encoder, drm_mode_config
200  *
201  * Returns 0 on success
202  */
203 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
204 /* removes and deallocates the drm structures, created by the above function */
205 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
206
207 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
208                                 struct drm_plane *plane,
209                                 unsigned long possible_crtcs,
210                                 const struct dc_plane_cap *plane_cap);
211 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
212                                struct drm_plane *plane,
213                                uint32_t link_index);
214 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
215                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
216                                     uint32_t link_index,
217                                     struct amdgpu_encoder *amdgpu_encoder);
218 static int amdgpu_dm_encoder_init(struct drm_device *dev,
219                                   struct amdgpu_encoder *aencoder,
220                                   uint32_t link_index);
221
222 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
223
224 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
225
226 static int amdgpu_dm_atomic_check(struct drm_device *dev,
227                                   struct drm_atomic_state *state);
228
229 static void handle_cursor_update(struct drm_plane *plane,
230                                  struct drm_plane_state *old_plane_state);
231
232 static const struct drm_format_info *
233 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
234
235 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
236 static void handle_hpd_rx_irq(void *param);
237
238 static bool
239 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
240                                  struct drm_crtc_state *new_crtc_state);
241 /*
242  * dm_vblank_get_counter
243  *
244  * @brief
245  * Get counter for number of vertical blanks
246  *
247  * @param
248  * struct amdgpu_device *adev - [in] desired amdgpu device
249  * int disp_idx - [in] which CRTC to get the counter from
250  *
251  * @return
252  * Counter for vertical blanks
253  */
254 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
255 {
256         if (crtc >= adev->mode_info.num_crtc)
257                 return 0;
258         else {
259                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260
261                 if (acrtc->dm_irq_params.stream == NULL) {
262                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263                                   crtc);
264                         return 0;
265                 }
266
267                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
268         }
269 }
270
271 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
272                                   u32 *vbl, u32 *position)
273 {
274         uint32_t v_blank_start, v_blank_end, h_position, v_position;
275
276         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
277                 return -EINVAL;
278         else {
279                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
280
281                 if (acrtc->dm_irq_params.stream ==  NULL) {
282                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
283                                   crtc);
284                         return 0;
285                 }
286
287                 /*
288                  * TODO rework base driver to use values directly.
289                  * for now parse it back into reg-format
290                  */
291                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
292                                          &v_blank_start,
293                                          &v_blank_end,
294                                          &h_position,
295                                          &v_position);
296
297                 *position = v_position | (h_position << 16);
298                 *vbl = v_blank_start | (v_blank_end << 16);
299         }
300
301         return 0;
302 }
303
304 static bool dm_is_idle(void *handle)
305 {
306         /* XXX todo */
307         return true;
308 }
309
310 static int dm_wait_for_idle(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static bool dm_check_soft_reset(void *handle)
317 {
318         return false;
319 }
320
321 static int dm_soft_reset(void *handle)
322 {
323         /* XXX todo */
324         return 0;
325 }
326
327 static struct amdgpu_crtc *
328 get_crtc_by_otg_inst(struct amdgpu_device *adev,
329                      int otg_inst)
330 {
331         struct drm_device *dev = adev_to_drm(adev);
332         struct drm_crtc *crtc;
333         struct amdgpu_crtc *amdgpu_crtc;
334
335         if (WARN_ON(otg_inst == -1))
336                 return adev->mode_info.crtcs[0];
337
338         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
339                 amdgpu_crtc = to_amdgpu_crtc(crtc);
340
341                 if (amdgpu_crtc->otg_inst == otg_inst)
342                         return amdgpu_crtc;
343         }
344
345         return NULL;
346 }
347
348 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
349 {
350         return acrtc->dm_irq_params.freesync_config.state ==
351                        VRR_STATE_ACTIVE_VARIABLE ||
352                acrtc->dm_irq_params.freesync_config.state ==
353                        VRR_STATE_ACTIVE_FIXED;
354 }
355
356 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
357 {
358         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
359                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
360 }
361
362 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
363                                               struct dm_crtc_state *new_state)
364 {
365         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
366                 return true;
367         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
368                 return true;
369         else
370                 return false;
371 }
372
373 /**
374  * dm_pflip_high_irq() - Handle pageflip interrupt
375  * @interrupt_params: ignored
376  *
377  * Handles the pageflip interrupt by notifying all interested parties
378  * that the pageflip has been completed.
379  */
380 static void dm_pflip_high_irq(void *interrupt_params)
381 {
382         struct amdgpu_crtc *amdgpu_crtc;
383         struct common_irq_params *irq_params = interrupt_params;
384         struct amdgpu_device *adev = irq_params->adev;
385         unsigned long flags;
386         struct drm_pending_vblank_event *e;
387         uint32_t vpos, hpos, v_blank_start, v_blank_end;
388         bool vrr_active;
389
390         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
391
392         /* IRQ could occur when in initial stage */
393         /* TODO work and BO cleanup */
394         if (amdgpu_crtc == NULL) {
395                 DC_LOG_PFLIP("CRTC is null, returning.\n");
396                 return;
397         }
398
399         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
400
401         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
402                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
403                                                  amdgpu_crtc->pflip_status,
404                                                  AMDGPU_FLIP_SUBMITTED,
405                                                  amdgpu_crtc->crtc_id,
406                                                  amdgpu_crtc);
407                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
408                 return;
409         }
410
411         /* page flip completed. */
412         e = amdgpu_crtc->event;
413         amdgpu_crtc->event = NULL;
414
415         WARN_ON(!e);
416
417         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
418
419         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
420         if (!vrr_active ||
421             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
422                                       &v_blank_end, &hpos, &vpos) ||
423             (vpos < v_blank_start)) {
424                 /* Update to correct count and vblank timestamp if racing with
425                  * vblank irq. This also updates to the correct vblank timestamp
426                  * even in VRR mode, as scanout is past the front-porch atm.
427                  */
428                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
429
430                 /* Wake up userspace by sending the pageflip event with proper
431                  * count and timestamp of vblank of flip completion.
432                  */
433                 if (e) {
434                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
435
436                         /* Event sent, so done with vblank for this flip */
437                         drm_crtc_vblank_put(&amdgpu_crtc->base);
438                 }
439         } else if (e) {
440                 /* VRR active and inside front-porch: vblank count and
441                  * timestamp for pageflip event will only be up to date after
442                  * drm_crtc_handle_vblank() has been executed from late vblank
443                  * irq handler after start of back-porch (vline 0). We queue the
444                  * pageflip event for send-out by drm_crtc_handle_vblank() with
445                  * updated timestamp and count, once it runs after us.
446                  *
447                  * We need to open-code this instead of using the helper
448                  * drm_crtc_arm_vblank_event(), as that helper would
449                  * call drm_crtc_accurate_vblank_count(), which we must
450                  * not call in VRR mode while we are in front-porch!
451                  */
452
453                 /* sequence will be replaced by real count during send-out. */
454                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
455                 e->pipe = amdgpu_crtc->crtc_id;
456
457                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
458                 e = NULL;
459         }
460
461         /* Keep track of vblank of this flip for flip throttling. We use the
462          * cooked hw counter, as that one incremented at start of this vblank
463          * of pageflip completion, so last_flip_vblank is the forbidden count
464          * for queueing new pageflips if vsync + VRR is enabled.
465          */
466         amdgpu_crtc->dm_irq_params.last_flip_vblank =
467                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
468
469         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
470         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
471
472         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
473                      amdgpu_crtc->crtc_id, amdgpu_crtc,
474                      vrr_active, (int) !e);
475 }
476
477 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
478 {
479         struct drm_crtc *crtc = &acrtc->base;
480         struct drm_device *dev = crtc->dev;
481         unsigned long flags;
482
483         drm_crtc_handle_vblank(crtc);
484
485         spin_lock_irqsave(&dev->event_lock, flags);
486
487         /* Send completion event for cursor-only commits */
488         if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
489                 drm_crtc_send_vblank_event(crtc, acrtc->event);
490                 drm_crtc_vblank_put(crtc);
491                 acrtc->event = NULL;
492         }
493
494         spin_unlock_irqrestore(&dev->event_lock, flags);
495 }
496
497 static void dm_vupdate_high_irq(void *interrupt_params)
498 {
499         struct common_irq_params *irq_params = interrupt_params;
500         struct amdgpu_device *adev = irq_params->adev;
501         struct amdgpu_crtc *acrtc;
502         struct drm_device *drm_dev;
503         struct drm_vblank_crtc *vblank;
504         ktime_t frame_duration_ns, previous_timestamp;
505         unsigned long flags;
506         int vrr_active;
507
508         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
509
510         if (acrtc) {
511                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
512                 drm_dev = acrtc->base.dev;
513                 vblank = &drm_dev->vblank[acrtc->base.index];
514                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
515                 frame_duration_ns = vblank->time - previous_timestamp;
516
517                 if (frame_duration_ns > 0) {
518                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
519                                                 frame_duration_ns,
520                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
521                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
522                 }
523
524                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
525                               acrtc->crtc_id,
526                               vrr_active);
527
528                 /* Core vblank handling is done here after end of front-porch in
529                  * vrr mode, as vblank timestamping will give valid results
530                  * while now done after front-porch. This will also deliver
531                  * page-flip completion events that have been queued to us
532                  * if a pageflip happened inside front-porch.
533                  */
534                 if (vrr_active) {
535                         dm_crtc_handle_vblank(acrtc);
536
537                         /* BTR processing for pre-DCE12 ASICs */
538                         if (acrtc->dm_irq_params.stream &&
539                             adev->family < AMDGPU_FAMILY_AI) {
540                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
541                                 mod_freesync_handle_v_update(
542                                     adev->dm.freesync_module,
543                                     acrtc->dm_irq_params.stream,
544                                     &acrtc->dm_irq_params.vrr_params);
545
546                                 dc_stream_adjust_vmin_vmax(
547                                     adev->dm.dc,
548                                     acrtc->dm_irq_params.stream,
549                                     &acrtc->dm_irq_params.vrr_params.adjust);
550                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
551                         }
552                 }
553         }
554 }
555
556 /**
557  * dm_crtc_high_irq() - Handles CRTC interrupt
558  * @interrupt_params: used for determining the CRTC instance
559  *
560  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
561  * event handler.
562  */
563 static void dm_crtc_high_irq(void *interrupt_params)
564 {
565         struct common_irq_params *irq_params = interrupt_params;
566         struct amdgpu_device *adev = irq_params->adev;
567         struct amdgpu_crtc *acrtc;
568         unsigned long flags;
569         int vrr_active;
570
571         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
572         if (!acrtc)
573                 return;
574
575         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
576
577         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
578                       vrr_active, acrtc->dm_irq_params.active_planes);
579
580         /**
581          * Core vblank handling at start of front-porch is only possible
582          * in non-vrr mode, as only there vblank timestamping will give
583          * valid results while done in front-porch. Otherwise defer it
584          * to dm_vupdate_high_irq after end of front-porch.
585          */
586         if (!vrr_active)
587                 dm_crtc_handle_vblank(acrtc);
588
589         /**
590          * Following stuff must happen at start of vblank, for crc
591          * computation and below-the-range btr support in vrr mode.
592          */
593         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
594
595         /* BTR updates need to happen before VUPDATE on Vega and above. */
596         if (adev->family < AMDGPU_FAMILY_AI)
597                 return;
598
599         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
600
601         if (acrtc->dm_irq_params.stream &&
602             acrtc->dm_irq_params.vrr_params.supported &&
603             acrtc->dm_irq_params.freesync_config.state ==
604                     VRR_STATE_ACTIVE_VARIABLE) {
605                 mod_freesync_handle_v_update(adev->dm.freesync_module,
606                                              acrtc->dm_irq_params.stream,
607                                              &acrtc->dm_irq_params.vrr_params);
608
609                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
610                                            &acrtc->dm_irq_params.vrr_params.adjust);
611         }
612
613         /*
614          * If there aren't any active_planes then DCH HUBP may be clock-gated.
615          * In that case, pageflip completion interrupts won't fire and pageflip
616          * completion events won't get delivered. Prevent this by sending
617          * pending pageflip events from here if a flip is still pending.
618          *
619          * If any planes are enabled, use dm_pflip_high_irq() instead, to
620          * avoid race conditions between flip programming and completion,
621          * which could cause too early flip completion events.
622          */
623         if (adev->family >= AMDGPU_FAMILY_RV &&
624             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
625             acrtc->dm_irq_params.active_planes == 0) {
626                 if (acrtc->event) {
627                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
628                         acrtc->event = NULL;
629                         drm_crtc_vblank_put(&acrtc->base);
630                 }
631                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
632         }
633
634         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
635 }
636
637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
638 /**
639  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
640  * DCN generation ASICs
641  * @interrupt_params: interrupt parameters
642  *
643  * Used to set crc window/read out crc value at vertical line 0 position
644  */
645 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
646 {
647         struct common_irq_params *irq_params = interrupt_params;
648         struct amdgpu_device *adev = irq_params->adev;
649         struct amdgpu_crtc *acrtc;
650
651         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
652
653         if (!acrtc)
654                 return;
655
656         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
657 }
658 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
659
660 /**
661  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
662  * @adev: amdgpu_device pointer
663  * @notify: dmub notification structure
664  *
665  * Dmub AUX or SET_CONFIG command completion processing callback
666  * Copies dmub notification to DM which is to be read by AUX command.
667  * issuing thread and also signals the event to wake up the thread.
668  */
669 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
670                                         struct dmub_notification *notify)
671 {
672         if (adev->dm.dmub_notify)
673                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
674         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
675                 complete(&adev->dm.dmub_aux_transfer_done);
676 }
677
678 /**
679  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
680  * @adev: amdgpu_device pointer
681  * @notify: dmub notification structure
682  *
683  * Dmub Hpd interrupt processing callback. Gets displayindex through the
684  * ink index and calls helper to do the processing.
685  */
686 static void dmub_hpd_callback(struct amdgpu_device *adev,
687                               struct dmub_notification *notify)
688 {
689         struct amdgpu_dm_connector *aconnector;
690         struct amdgpu_dm_connector *hpd_aconnector = NULL;
691         struct drm_connector *connector;
692         struct drm_connector_list_iter iter;
693         struct dc_link *link;
694         uint8_t link_index = 0;
695         struct drm_device *dev;
696
697         if (adev == NULL)
698                 return;
699
700         if (notify == NULL) {
701                 DRM_ERROR("DMUB HPD callback notification was NULL");
702                 return;
703         }
704
705         if (notify->link_index > adev->dm.dc->link_count) {
706                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
707                 return;
708         }
709
710         link_index = notify->link_index;
711         link = adev->dm.dc->links[link_index];
712         dev = adev->dm.ddev;
713
714         drm_connector_list_iter_begin(dev, &iter);
715         drm_for_each_connector_iter(connector, &iter) {
716                 aconnector = to_amdgpu_dm_connector(connector);
717                 if (link && aconnector->dc_link == link) {
718                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
719                         hpd_aconnector = aconnector;
720                         break;
721                 }
722         }
723         drm_connector_list_iter_end(&iter);
724
725         if (hpd_aconnector) {
726                 if (notify->type == DMUB_NOTIFICATION_HPD)
727                         handle_hpd_irq_helper(hpd_aconnector);
728                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
729                         handle_hpd_rx_irq(hpd_aconnector);
730         }
731 }
732
733 /**
734  * register_dmub_notify_callback - Sets callback for DMUB notify
735  * @adev: amdgpu_device pointer
736  * @type: Type of dmub notification
737  * @callback: Dmub interrupt callback function
738  * @dmub_int_thread_offload: offload indicator
739  *
740  * API to register a dmub callback handler for a dmub notification
741  * Also sets indicator whether callback processing to be offloaded.
742  * to dmub interrupt handling thread
743  * Return: true if successfully registered, false if there is existing registration
744  */
745 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
746                                           enum dmub_notification_type type,
747                                           dmub_notify_interrupt_callback_t callback,
748                                           bool dmub_int_thread_offload)
749 {
750         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
751                 adev->dm.dmub_callback[type] = callback;
752                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
753         } else
754                 return false;
755
756         return true;
757 }
758
759 static void dm_handle_hpd_work(struct work_struct *work)
760 {
761         struct dmub_hpd_work *dmub_hpd_wrk;
762
763         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
764
765         if (!dmub_hpd_wrk->dmub_notify) {
766                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
767                 return;
768         }
769
770         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
771                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
772                 dmub_hpd_wrk->dmub_notify);
773         }
774
775         kfree(dmub_hpd_wrk->dmub_notify);
776         kfree(dmub_hpd_wrk);
777
778 }
779
780 #define DMUB_TRACE_MAX_READ 64
781 /**
782  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
783  * @interrupt_params: used for determining the Outbox instance
784  *
785  * Handles the Outbox Interrupt
786  * event handler.
787  */
788 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
789 {
790         struct dmub_notification notify;
791         struct common_irq_params *irq_params = interrupt_params;
792         struct amdgpu_device *adev = irq_params->adev;
793         struct amdgpu_display_manager *dm = &adev->dm;
794         struct dmcub_trace_buf_entry entry = { 0 };
795         uint32_t count = 0;
796         struct dmub_hpd_work *dmub_hpd_wrk;
797         struct dc_link *plink = NULL;
798
799         if (dc_enable_dmub_notifications(adev->dm.dc) &&
800                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
801
802                 do {
803                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
804                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
805                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
806                                 continue;
807                         }
808                         if (!dm->dmub_callback[notify.type]) {
809                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
810                                 continue;
811                         }
812                         if (dm->dmub_thread_offload[notify.type] == true) {
813                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
814                                 if (!dmub_hpd_wrk) {
815                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
816                                         return;
817                                 }
818                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
819                                 if (!dmub_hpd_wrk->dmub_notify) {
820                                         kfree(dmub_hpd_wrk);
821                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
822                                         return;
823                                 }
824                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
825                                 if (dmub_hpd_wrk->dmub_notify)
826                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
827                                 dmub_hpd_wrk->adev = adev;
828                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
829                                         plink = adev->dm.dc->links[notify.link_index];
830                                         if (plink) {
831                                                 plink->hpd_status =
832                                                         notify.hpd_status == DP_HPD_PLUG;
833                                         }
834                                 }
835                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
836                         } else {
837                                 dm->dmub_callback[notify.type](adev, &notify);
838                         }
839                 } while (notify.pending_notification);
840         }
841
842
843         do {
844                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
845                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
846                                                         entry.param0, entry.param1);
847
848                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
849                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
850                 } else
851                         break;
852
853                 count++;
854
855         } while (count <= DMUB_TRACE_MAX_READ);
856
857         if (count > DMUB_TRACE_MAX_READ)
858                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
859 }
860
861 static int dm_set_clockgating_state(void *handle,
862                   enum amd_clockgating_state state)
863 {
864         return 0;
865 }
866
867 static int dm_set_powergating_state(void *handle,
868                   enum amd_powergating_state state)
869 {
870         return 0;
871 }
872
873 /* Prototypes of private functions */
874 static int dm_early_init(void* handle);
875
876 /* Allocate memory for FBC compressed data  */
877 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
878 {
879         struct drm_device *dev = connector->dev;
880         struct amdgpu_device *adev = drm_to_adev(dev);
881         struct dm_compressor_info *compressor = &adev->dm.compressor;
882         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
883         struct drm_display_mode *mode;
884         unsigned long max_size = 0;
885
886         if (adev->dm.dc->fbc_compressor == NULL)
887                 return;
888
889         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
890                 return;
891
892         if (compressor->bo_ptr)
893                 return;
894
895
896         list_for_each_entry(mode, &connector->modes, head) {
897                 if (max_size < mode->htotal * mode->vtotal)
898                         max_size = mode->htotal * mode->vtotal;
899         }
900
901         if (max_size) {
902                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
903                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
904                             &compressor->gpu_addr, &compressor->cpu_addr);
905
906                 if (r)
907                         DRM_ERROR("DM: Failed to initialize FBC\n");
908                 else {
909                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
910                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
911                 }
912
913         }
914
915 }
916
917 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
918                                           int pipe, bool *enabled,
919                                           unsigned char *buf, int max_bytes)
920 {
921         struct drm_device *dev = dev_get_drvdata(kdev);
922         struct amdgpu_device *adev = drm_to_adev(dev);
923         struct drm_connector *connector;
924         struct drm_connector_list_iter conn_iter;
925         struct amdgpu_dm_connector *aconnector;
926         int ret = 0;
927
928         *enabled = false;
929
930         mutex_lock(&adev->dm.audio_lock);
931
932         drm_connector_list_iter_begin(dev, &conn_iter);
933         drm_for_each_connector_iter(connector, &conn_iter) {
934                 aconnector = to_amdgpu_dm_connector(connector);
935                 if (aconnector->audio_inst != port)
936                         continue;
937
938                 *enabled = true;
939                 ret = drm_eld_size(connector->eld);
940                 memcpy(buf, connector->eld, min(max_bytes, ret));
941
942                 break;
943         }
944         drm_connector_list_iter_end(&conn_iter);
945
946         mutex_unlock(&adev->dm.audio_lock);
947
948         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
949
950         return ret;
951 }
952
953 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
954         .get_eld = amdgpu_dm_audio_component_get_eld,
955 };
956
957 static int amdgpu_dm_audio_component_bind(struct device *kdev,
958                                        struct device *hda_kdev, void *data)
959 {
960         struct drm_device *dev = dev_get_drvdata(kdev);
961         struct amdgpu_device *adev = drm_to_adev(dev);
962         struct drm_audio_component *acomp = data;
963
964         acomp->ops = &amdgpu_dm_audio_component_ops;
965         acomp->dev = kdev;
966         adev->dm.audio_component = acomp;
967
968         return 0;
969 }
970
971 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
972                                           struct device *hda_kdev, void *data)
973 {
974         struct drm_device *dev = dev_get_drvdata(kdev);
975         struct amdgpu_device *adev = drm_to_adev(dev);
976         struct drm_audio_component *acomp = data;
977
978         acomp->ops = NULL;
979         acomp->dev = NULL;
980         adev->dm.audio_component = NULL;
981 }
982
983 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
984         .bind   = amdgpu_dm_audio_component_bind,
985         .unbind = amdgpu_dm_audio_component_unbind,
986 };
987
988 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
989 {
990         int i, ret;
991
992         if (!amdgpu_audio)
993                 return 0;
994
995         adev->mode_info.audio.enabled = true;
996
997         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
998
999         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1000                 adev->mode_info.audio.pin[i].channels = -1;
1001                 adev->mode_info.audio.pin[i].rate = -1;
1002                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1003                 adev->mode_info.audio.pin[i].status_bits = 0;
1004                 adev->mode_info.audio.pin[i].category_code = 0;
1005                 adev->mode_info.audio.pin[i].connected = false;
1006                 adev->mode_info.audio.pin[i].id =
1007                         adev->dm.dc->res_pool->audios[i]->inst;
1008                 adev->mode_info.audio.pin[i].offset = 0;
1009         }
1010
1011         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1012         if (ret < 0)
1013                 return ret;
1014
1015         adev->dm.audio_registered = true;
1016
1017         return 0;
1018 }
1019
1020 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1021 {
1022         if (!amdgpu_audio)
1023                 return;
1024
1025         if (!adev->mode_info.audio.enabled)
1026                 return;
1027
1028         if (adev->dm.audio_registered) {
1029                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1030                 adev->dm.audio_registered = false;
1031         }
1032
1033         /* TODO: Disable audio? */
1034
1035         adev->mode_info.audio.enabled = false;
1036 }
1037
1038 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1039 {
1040         struct drm_audio_component *acomp = adev->dm.audio_component;
1041
1042         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1043                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1044
1045                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1046                                                  pin, -1);
1047         }
1048 }
1049
1050 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1051 {
1052         const struct dmcub_firmware_header_v1_0 *hdr;
1053         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1054         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1055         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1056         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1057         struct abm *abm = adev->dm.dc->res_pool->abm;
1058         struct dmub_srv_hw_params hw_params;
1059         enum dmub_status status;
1060         const unsigned char *fw_inst_const, *fw_bss_data;
1061         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1062         bool has_hw_support;
1063
1064         if (!dmub_srv)
1065                 /* DMUB isn't supported on the ASIC. */
1066                 return 0;
1067
1068         if (!fb_info) {
1069                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1070                 return -EINVAL;
1071         }
1072
1073         if (!dmub_fw) {
1074                 /* Firmware required for DMUB support. */
1075                 DRM_ERROR("No firmware provided for DMUB.\n");
1076                 return -EINVAL;
1077         }
1078
1079         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1080         if (status != DMUB_STATUS_OK) {
1081                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1082                 return -EINVAL;
1083         }
1084
1085         if (!has_hw_support) {
1086                 DRM_INFO("DMUB unsupported on ASIC\n");
1087                 return 0;
1088         }
1089
1090         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1091         status = dmub_srv_hw_reset(dmub_srv);
1092         if (status != DMUB_STATUS_OK)
1093                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1094
1095         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1096
1097         fw_inst_const = dmub_fw->data +
1098                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1099                         PSP_HEADER_BYTES;
1100
1101         fw_bss_data = dmub_fw->data +
1102                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1103                       le32_to_cpu(hdr->inst_const_bytes);
1104
1105         /* Copy firmware and bios info into FB memory. */
1106         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1107                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1108
1109         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1110
1111         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1112          * amdgpu_ucode_init_single_fw will load dmub firmware
1113          * fw_inst_const part to cw0; otherwise, the firmware back door load
1114          * will be done by dm_dmub_hw_init
1115          */
1116         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1117                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1118                                 fw_inst_const_size);
1119         }
1120
1121         if (fw_bss_data_size)
1122                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1123                        fw_bss_data, fw_bss_data_size);
1124
1125         /* Copy firmware bios info into FB memory. */
1126         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1127                adev->bios_size);
1128
1129         /* Reset regions that need to be reset. */
1130         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1131         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1132
1133         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1134                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1135
1136         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1137                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1138
1139         /* Initialize hardware. */
1140         memset(&hw_params, 0, sizeof(hw_params));
1141         hw_params.fb_base = adev->gmc.fb_start;
1142         hw_params.fb_offset = adev->gmc.aper_base;
1143
1144         /* backdoor load firmware and trigger dmub running */
1145         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1146                 hw_params.load_inst_const = true;
1147
1148         if (dmcu)
1149                 hw_params.psp_version = dmcu->psp_version;
1150
1151         for (i = 0; i < fb_info->num_fb; ++i)
1152                 hw_params.fb[i] = &fb_info->fb[i];
1153
1154         switch (adev->ip_versions[DCE_HWIP][0]) {
1155         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1156                 hw_params.dpia_supported = true;
1157                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1158                 break;
1159         default:
1160                 break;
1161         }
1162
1163         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1164         if (status != DMUB_STATUS_OK) {
1165                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1166                 return -EINVAL;
1167         }
1168
1169         /* Wait for firmware load to finish. */
1170         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1171         if (status != DMUB_STATUS_OK)
1172                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1173
1174         /* Init DMCU and ABM if available. */
1175         if (dmcu && abm) {
1176                 dmcu->funcs->dmcu_init(dmcu);
1177                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1178         }
1179
1180         if (!adev->dm.dc->ctx->dmub_srv)
1181                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1182         if (!adev->dm.dc->ctx->dmub_srv) {
1183                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1184                 return -ENOMEM;
1185         }
1186
1187         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1188                  adev->dm.dmcub_fw_version);
1189
1190         return 0;
1191 }
1192
1193 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1194 {
1195         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1196         enum dmub_status status;
1197         bool init;
1198
1199         if (!dmub_srv) {
1200                 /* DMUB isn't supported on the ASIC. */
1201                 return;
1202         }
1203
1204         status = dmub_srv_is_hw_init(dmub_srv, &init);
1205         if (status != DMUB_STATUS_OK)
1206                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1207
1208         if (status == DMUB_STATUS_OK && init) {
1209                 /* Wait for firmware load to finish. */
1210                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1211                 if (status != DMUB_STATUS_OK)
1212                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1213         } else {
1214                 /* Perform the full hardware initialization. */
1215                 dm_dmub_hw_init(adev);
1216         }
1217 }
1218
1219 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1220 {
1221         uint64_t pt_base;
1222         uint32_t logical_addr_low;
1223         uint32_t logical_addr_high;
1224         uint32_t agp_base, agp_bot, agp_top;
1225         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1226
1227         memset(pa_config, 0, sizeof(*pa_config));
1228
1229         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1230         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1231
1232         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1233                 /*
1234                  * Raven2 has a HW issue that it is unable to use the vram which
1235                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1236                  * workaround that increase system aperture high address (add 1)
1237                  * to get rid of the VM fault and hardware hang.
1238                  */
1239                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1240         else
1241                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1242
1243         agp_base = 0;
1244         agp_bot = adev->gmc.agp_start >> 24;
1245         agp_top = adev->gmc.agp_end >> 24;
1246
1247
1248         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1249         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1250         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1251         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1252         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1253         page_table_base.low_part = lower_32_bits(pt_base);
1254
1255         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1256         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1257
1258         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1259         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1260         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1261
1262         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1263         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1264         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1265
1266         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1267         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1268         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1269
1270         pa_config->is_hvm_enabled = 0;
1271
1272 }
1273
1274 static void vblank_control_worker(struct work_struct *work)
1275 {
1276         struct vblank_control_work *vblank_work =
1277                 container_of(work, struct vblank_control_work, work);
1278         struct amdgpu_display_manager *dm = vblank_work->dm;
1279
1280         mutex_lock(&dm->dc_lock);
1281
1282         if (vblank_work->enable)
1283                 dm->active_vblank_irq_count++;
1284         else if(dm->active_vblank_irq_count)
1285                 dm->active_vblank_irq_count--;
1286
1287         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1288
1289         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1290
1291         /*
1292          * Control PSR based on vblank requirements from OS
1293          *
1294          * If panel supports PSR SU, there's no need to disable PSR when OS is
1295          * submitting fast atomic commits (we infer this by whether the OS
1296          * requests vblank events). Fast atomic commits will simply trigger a
1297          * full-frame-update (FFU); a specific case of selective-update (SU)
1298          * where the SU region is the full hactive*vactive region. See
1299          * fill_dc_dirty_rects().
1300          */
1301         if (vblank_work->stream && vblank_work->stream->link) {
1302                 if (vblank_work->enable) {
1303                         if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1304                             vblank_work->stream->link->psr_settings.psr_allow_active)
1305                                 amdgpu_dm_psr_disable(vblank_work->stream);
1306                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1307                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1308                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1309                         amdgpu_dm_psr_enable(vblank_work->stream);
1310                 }
1311         }
1312
1313         mutex_unlock(&dm->dc_lock);
1314
1315         dc_stream_release(vblank_work->stream);
1316
1317         kfree(vblank_work);
1318 }
1319
1320 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1321 {
1322         struct hpd_rx_irq_offload_work *offload_work;
1323         struct amdgpu_dm_connector *aconnector;
1324         struct dc_link *dc_link;
1325         struct amdgpu_device *adev;
1326         enum dc_connection_type new_connection_type = dc_connection_none;
1327         unsigned long flags;
1328
1329         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1330         aconnector = offload_work->offload_wq->aconnector;
1331
1332         if (!aconnector) {
1333                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1334                 goto skip;
1335         }
1336
1337         adev = drm_to_adev(aconnector->base.dev);
1338         dc_link = aconnector->dc_link;
1339
1340         mutex_lock(&aconnector->hpd_lock);
1341         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1342                 DRM_ERROR("KMS: Failed to detect connector\n");
1343         mutex_unlock(&aconnector->hpd_lock);
1344
1345         if (new_connection_type == dc_connection_none)
1346                 goto skip;
1347
1348         if (amdgpu_in_reset(adev))
1349                 goto skip;
1350
1351         mutex_lock(&adev->dm.dc_lock);
1352         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1353                 dc_link_dp_handle_automated_test(dc_link);
1354         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1355                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1356                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1357                 dc_link_dp_handle_link_loss(dc_link);
1358                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1359                 offload_work->offload_wq->is_handling_link_loss = false;
1360                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1361         }
1362         mutex_unlock(&adev->dm.dc_lock);
1363
1364 skip:
1365         kfree(offload_work);
1366
1367 }
1368
1369 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1370 {
1371         int max_caps = dc->caps.max_links;
1372         int i = 0;
1373         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1374
1375         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1376
1377         if (!hpd_rx_offload_wq)
1378                 return NULL;
1379
1380
1381         for (i = 0; i < max_caps; i++) {
1382                 hpd_rx_offload_wq[i].wq =
1383                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1384
1385                 if (hpd_rx_offload_wq[i].wq == NULL) {
1386                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1387                         return NULL;
1388                 }
1389
1390                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1391         }
1392
1393         return hpd_rx_offload_wq;
1394 }
1395
1396 struct amdgpu_stutter_quirk {
1397         u16 chip_vendor;
1398         u16 chip_device;
1399         u16 subsys_vendor;
1400         u16 subsys_device;
1401         u8 revision;
1402 };
1403
1404 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1405         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1406         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1407         { 0, 0, 0, 0, 0 },
1408 };
1409
1410 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1411 {
1412         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1413
1414         while (p && p->chip_device != 0) {
1415                 if (pdev->vendor == p->chip_vendor &&
1416                     pdev->device == p->chip_device &&
1417                     pdev->subsystem_vendor == p->subsys_vendor &&
1418                     pdev->subsystem_device == p->subsys_device &&
1419                     pdev->revision == p->revision) {
1420                         return true;
1421                 }
1422                 ++p;
1423         }
1424         return false;
1425 }
1426
1427 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1428         {
1429                 .matches = {
1430                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1431                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1432                 },
1433         },
1434         {
1435                 .matches = {
1436                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1437                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1438                 },
1439         },
1440         {
1441                 .matches = {
1442                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1443                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1444                 },
1445         },
1446         {}
1447 };
1448
1449 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1450 {
1451         const struct dmi_system_id *dmi_id;
1452
1453         dm->aux_hpd_discon_quirk = false;
1454
1455         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1456         if (dmi_id) {
1457                 dm->aux_hpd_discon_quirk = true;
1458                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1459         }
1460 }
1461
1462 static int amdgpu_dm_init(struct amdgpu_device *adev)
1463 {
1464         struct dc_init_data init_data;
1465 #ifdef CONFIG_DRM_AMD_DC_HDCP
1466         struct dc_callback_init init_params;
1467 #endif
1468         int r;
1469
1470         adev->dm.ddev = adev_to_drm(adev);
1471         adev->dm.adev = adev;
1472
1473         /* Zero all the fields */
1474         memset(&init_data, 0, sizeof(init_data));
1475 #ifdef CONFIG_DRM_AMD_DC_HDCP
1476         memset(&init_params, 0, sizeof(init_params));
1477 #endif
1478
1479         mutex_init(&adev->dm.dc_lock);
1480         mutex_init(&adev->dm.audio_lock);
1481         spin_lock_init(&adev->dm.vblank_lock);
1482
1483         if(amdgpu_dm_irq_init(adev)) {
1484                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1485                 goto error;
1486         }
1487
1488         init_data.asic_id.chip_family = adev->family;
1489
1490         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1491         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1492         init_data.asic_id.chip_id = adev->pdev->device;
1493
1494         init_data.asic_id.vram_width = adev->gmc.vram_width;
1495         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1496         init_data.asic_id.atombios_base_address =
1497                 adev->mode_info.atom_context->bios;
1498
1499         init_data.driver = adev;
1500
1501         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1502
1503         if (!adev->dm.cgs_device) {
1504                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1505                 goto error;
1506         }
1507
1508         init_data.cgs_device = adev->dm.cgs_device;
1509
1510         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1511
1512         switch (adev->ip_versions[DCE_HWIP][0]) {
1513         case IP_VERSION(2, 1, 0):
1514                 switch (adev->dm.dmcub_fw_version) {
1515                 case 0: /* development */
1516                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1517                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1518                         init_data.flags.disable_dmcu = false;
1519                         break;
1520                 default:
1521                         init_data.flags.disable_dmcu = true;
1522                 }
1523                 break;
1524         case IP_VERSION(2, 0, 3):
1525                 init_data.flags.disable_dmcu = true;
1526                 break;
1527         default:
1528                 break;
1529         }
1530
1531         switch (adev->asic_type) {
1532         case CHIP_CARRIZO:
1533         case CHIP_STONEY:
1534                 init_data.flags.gpu_vm_support = true;
1535                 break;
1536         default:
1537                 switch (adev->ip_versions[DCE_HWIP][0]) {
1538                 case IP_VERSION(1, 0, 0):
1539                 case IP_VERSION(1, 0, 1):
1540                         /* enable S/G on PCO and RV2 */
1541                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1542                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1543                                 init_data.flags.gpu_vm_support = true;
1544                         break;
1545                 case IP_VERSION(2, 1, 0):
1546                 case IP_VERSION(3, 0, 1):
1547                 case IP_VERSION(3, 1, 2):
1548                 case IP_VERSION(3, 1, 3):
1549                 case IP_VERSION(3, 1, 5):
1550                 case IP_VERSION(3, 1, 6):
1551                         init_data.flags.gpu_vm_support = true;
1552                         break;
1553                 default:
1554                         break;
1555                 }
1556                 break;
1557         }
1558
1559         if (init_data.flags.gpu_vm_support)
1560                 adev->mode_info.gpu_vm_support = true;
1561
1562         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1563                 init_data.flags.fbc_support = true;
1564
1565         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1566                 init_data.flags.multi_mon_pp_mclk_switch = true;
1567
1568         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1569                 init_data.flags.disable_fractional_pwm = true;
1570
1571         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1572                 init_data.flags.edp_no_power_sequencing = true;
1573
1574         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1575                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1576         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1577                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1578
1579         init_data.flags.seamless_boot_edp_requested = false;
1580
1581         if (check_seamless_boot_capability(adev)) {
1582                 init_data.flags.seamless_boot_edp_requested = true;
1583                 init_data.flags.allow_seamless_boot_optimization = true;
1584                 DRM_INFO("Seamless boot condition check passed\n");
1585         }
1586
1587         init_data.flags.enable_mipi_converter_optimization = true;
1588
1589         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1590         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1591
1592         INIT_LIST_HEAD(&adev->dm.da_list);
1593
1594         retrieve_dmi_info(&adev->dm);
1595
1596         /* Display Core create. */
1597         adev->dm.dc = dc_create(&init_data);
1598
1599         if (adev->dm.dc) {
1600                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1601         } else {
1602                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1603                 goto error;
1604         }
1605
1606         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1607                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1608                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1609         }
1610
1611         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1612                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1613         if (dm_should_disable_stutter(adev->pdev))
1614                 adev->dm.dc->debug.disable_stutter = true;
1615
1616         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1617                 adev->dm.dc->debug.disable_stutter = true;
1618
1619         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1620                 adev->dm.dc->debug.disable_dsc = true;
1621                 adev->dm.dc->debug.disable_dsc_edp = true;
1622         }
1623
1624         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1625                 adev->dm.dc->debug.disable_clock_gate = true;
1626
1627         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1628                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1629
1630         r = dm_dmub_hw_init(adev);
1631         if (r) {
1632                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1633                 goto error;
1634         }
1635
1636         dc_hardware_init(adev->dm.dc);
1637
1638         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1639         if (!adev->dm.hpd_rx_offload_wq) {
1640                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1641                 goto error;
1642         }
1643
1644         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1645                 struct dc_phy_addr_space_config pa_config;
1646
1647                 mmhub_read_system_context(adev, &pa_config);
1648
1649                 // Call the DC init_memory func
1650                 dc_setup_system_context(adev->dm.dc, &pa_config);
1651         }
1652
1653         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1654         if (!adev->dm.freesync_module) {
1655                 DRM_ERROR(
1656                 "amdgpu: failed to initialize freesync_module.\n");
1657         } else
1658                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1659                                 adev->dm.freesync_module);
1660
1661         amdgpu_dm_init_color_mod();
1662
1663         if (adev->dm.dc->caps.max_links > 0) {
1664                 adev->dm.vblank_control_workqueue =
1665                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1666                 if (!adev->dm.vblank_control_workqueue)
1667                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1668         }
1669
1670 #ifdef CONFIG_DRM_AMD_DC_HDCP
1671         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1672                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1673
1674                 if (!adev->dm.hdcp_workqueue)
1675                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1676                 else
1677                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1678
1679                 dc_init_callbacks(adev->dm.dc, &init_params);
1680         }
1681 #endif
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1684 #endif
1685         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1686                 init_completion(&adev->dm.dmub_aux_transfer_done);
1687                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1688                 if (!adev->dm.dmub_notify) {
1689                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1690                         goto error;
1691                 }
1692
1693                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1694                 if (!adev->dm.delayed_hpd_wq) {
1695                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1696                         goto error;
1697                 }
1698
1699                 amdgpu_dm_outbox_init(adev);
1700                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1701                         dmub_aux_setconfig_callback, false)) {
1702                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1703                         goto error;
1704                 }
1705                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1706                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1707                         goto error;
1708                 }
1709                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1710                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1711                         goto error;
1712                 }
1713         }
1714
1715         if (amdgpu_dm_initialize_drm_device(adev)) {
1716                 DRM_ERROR(
1717                 "amdgpu: failed to initialize sw for display support.\n");
1718                 goto error;
1719         }
1720
1721         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1722          * It is expected that DMUB will resend any pending notifications at this point, for
1723          * example HPD from DPIA.
1724          */
1725         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1726                 dc_enable_dmub_outbox(adev->dm.dc);
1727
1728         /* create fake encoders for MST */
1729         dm_dp_create_fake_mst_encoders(adev);
1730
1731         /* TODO: Add_display_info? */
1732
1733         /* TODO use dynamic cursor width */
1734         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1735         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1736
1737         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1738                 DRM_ERROR(
1739                 "amdgpu: failed to initialize sw for display support.\n");
1740                 goto error;
1741         }
1742
1743
1744         DRM_DEBUG_DRIVER("KMS initialized.\n");
1745
1746         return 0;
1747 error:
1748         amdgpu_dm_fini(adev);
1749
1750         return -EINVAL;
1751 }
1752
1753 static int amdgpu_dm_early_fini(void *handle)
1754 {
1755         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1756
1757         amdgpu_dm_audio_fini(adev);
1758
1759         return 0;
1760 }
1761
1762 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1763 {
1764         int i;
1765
1766         if (adev->dm.vblank_control_workqueue) {
1767                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1768                 adev->dm.vblank_control_workqueue = NULL;
1769         }
1770
1771         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1772                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1773         }
1774
1775         amdgpu_dm_destroy_drm_device(&adev->dm);
1776
1777 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1778         if (adev->dm.crc_rd_wrk) {
1779                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1780                 kfree(adev->dm.crc_rd_wrk);
1781                 adev->dm.crc_rd_wrk = NULL;
1782         }
1783 #endif
1784 #ifdef CONFIG_DRM_AMD_DC_HDCP
1785         if (adev->dm.hdcp_workqueue) {
1786                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1787                 adev->dm.hdcp_workqueue = NULL;
1788         }
1789
1790         if (adev->dm.dc)
1791                 dc_deinit_callbacks(adev->dm.dc);
1792 #endif
1793
1794         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1795
1796         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1797                 kfree(adev->dm.dmub_notify);
1798                 adev->dm.dmub_notify = NULL;
1799                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1800                 adev->dm.delayed_hpd_wq = NULL;
1801         }
1802
1803         if (adev->dm.dmub_bo)
1804                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1805                                       &adev->dm.dmub_bo_gpu_addr,
1806                                       &adev->dm.dmub_bo_cpu_addr);
1807
1808         if (adev->dm.hpd_rx_offload_wq) {
1809                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1810                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1811                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1812                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1813                         }
1814                 }
1815
1816                 kfree(adev->dm.hpd_rx_offload_wq);
1817                 adev->dm.hpd_rx_offload_wq = NULL;
1818         }
1819
1820         /* DC Destroy TODO: Replace destroy DAL */
1821         if (adev->dm.dc)
1822                 dc_destroy(&adev->dm.dc);
1823         /*
1824          * TODO: pageflip, vlank interrupt
1825          *
1826          * amdgpu_dm_irq_fini(adev);
1827          */
1828
1829         if (adev->dm.cgs_device) {
1830                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1831                 adev->dm.cgs_device = NULL;
1832         }
1833         if (adev->dm.freesync_module) {
1834                 mod_freesync_destroy(adev->dm.freesync_module);
1835                 adev->dm.freesync_module = NULL;
1836         }
1837
1838         mutex_destroy(&adev->dm.audio_lock);
1839         mutex_destroy(&adev->dm.dc_lock);
1840
1841         return;
1842 }
1843
1844 static int load_dmcu_fw(struct amdgpu_device *adev)
1845 {
1846         const char *fw_name_dmcu = NULL;
1847         int r;
1848         const struct dmcu_firmware_header_v1_0 *hdr;
1849
1850         switch(adev->asic_type) {
1851 #if defined(CONFIG_DRM_AMD_DC_SI)
1852         case CHIP_TAHITI:
1853         case CHIP_PITCAIRN:
1854         case CHIP_VERDE:
1855         case CHIP_OLAND:
1856 #endif
1857         case CHIP_BONAIRE:
1858         case CHIP_HAWAII:
1859         case CHIP_KAVERI:
1860         case CHIP_KABINI:
1861         case CHIP_MULLINS:
1862         case CHIP_TONGA:
1863         case CHIP_FIJI:
1864         case CHIP_CARRIZO:
1865         case CHIP_STONEY:
1866         case CHIP_POLARIS11:
1867         case CHIP_POLARIS10:
1868         case CHIP_POLARIS12:
1869         case CHIP_VEGAM:
1870         case CHIP_VEGA10:
1871         case CHIP_VEGA12:
1872         case CHIP_VEGA20:
1873                 return 0;
1874         case CHIP_NAVI12:
1875                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1876                 break;
1877         case CHIP_RAVEN:
1878                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1879                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1880                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1881                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1882                 else
1883                         return 0;
1884                 break;
1885         default:
1886                 switch (adev->ip_versions[DCE_HWIP][0]) {
1887                 case IP_VERSION(2, 0, 2):
1888                 case IP_VERSION(2, 0, 3):
1889                 case IP_VERSION(2, 0, 0):
1890                 case IP_VERSION(2, 1, 0):
1891                 case IP_VERSION(3, 0, 0):
1892                 case IP_VERSION(3, 0, 2):
1893                 case IP_VERSION(3, 0, 3):
1894                 case IP_VERSION(3, 0, 1):
1895                 case IP_VERSION(3, 1, 2):
1896                 case IP_VERSION(3, 1, 3):
1897                 case IP_VERSION(3, 1, 5):
1898                 case IP_VERSION(3, 1, 6):
1899                 case IP_VERSION(3, 2, 0):
1900                 case IP_VERSION(3, 2, 1):
1901                         return 0;
1902                 default:
1903                         break;
1904                 }
1905                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1906                 return -EINVAL;
1907         }
1908
1909         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1910                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1911                 return 0;
1912         }
1913
1914         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1915         if (r == -ENOENT) {
1916                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1917                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1918                 adev->dm.fw_dmcu = NULL;
1919                 return 0;
1920         }
1921         if (r) {
1922                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1923                         fw_name_dmcu);
1924                 return r;
1925         }
1926
1927         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1928         if (r) {
1929                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1930                         fw_name_dmcu);
1931                 release_firmware(adev->dm.fw_dmcu);
1932                 adev->dm.fw_dmcu = NULL;
1933                 return r;
1934         }
1935
1936         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1937         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1938         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1939         adev->firmware.fw_size +=
1940                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1941
1942         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1943         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1944         adev->firmware.fw_size +=
1945                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1946
1947         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1948
1949         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1950
1951         return 0;
1952 }
1953
1954 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1955 {
1956         struct amdgpu_device *adev = ctx;
1957
1958         return dm_read_reg(adev->dm.dc->ctx, address);
1959 }
1960
1961 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1962                                      uint32_t value)
1963 {
1964         struct amdgpu_device *adev = ctx;
1965
1966         return dm_write_reg(adev->dm.dc->ctx, address, value);
1967 }
1968
1969 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1970 {
1971         struct dmub_srv_create_params create_params;
1972         struct dmub_srv_region_params region_params;
1973         struct dmub_srv_region_info region_info;
1974         struct dmub_srv_fb_params fb_params;
1975         struct dmub_srv_fb_info *fb_info;
1976         struct dmub_srv *dmub_srv;
1977         const struct dmcub_firmware_header_v1_0 *hdr;
1978         const char *fw_name_dmub;
1979         enum dmub_asic dmub_asic;
1980         enum dmub_status status;
1981         int r;
1982
1983         switch (adev->ip_versions[DCE_HWIP][0]) {
1984         case IP_VERSION(2, 1, 0):
1985                 dmub_asic = DMUB_ASIC_DCN21;
1986                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1987                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1988                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1989                 break;
1990         case IP_VERSION(3, 0, 0):
1991                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1992                         dmub_asic = DMUB_ASIC_DCN30;
1993                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1994                 } else {
1995                         dmub_asic = DMUB_ASIC_DCN30;
1996                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1997                 }
1998                 break;
1999         case IP_VERSION(3, 0, 1):
2000                 dmub_asic = DMUB_ASIC_DCN301;
2001                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
2002                 break;
2003         case IP_VERSION(3, 0, 2):
2004                 dmub_asic = DMUB_ASIC_DCN302;
2005                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
2006                 break;
2007         case IP_VERSION(3, 0, 3):
2008                 dmub_asic = DMUB_ASIC_DCN303;
2009                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2010                 break;
2011         case IP_VERSION(3, 1, 2):
2012         case IP_VERSION(3, 1, 3):
2013                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2014                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2015                 break;
2016         case IP_VERSION(3, 1, 4):
2017                 dmub_asic = DMUB_ASIC_DCN314;
2018                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
2019                 break;
2020         case IP_VERSION(3, 1, 5):
2021                 dmub_asic = DMUB_ASIC_DCN315;
2022                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2023                 break;
2024         case IP_VERSION(3, 1, 6):
2025                 dmub_asic = DMUB_ASIC_DCN316;
2026                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2027                 break;
2028         case IP_VERSION(3, 2, 0):
2029                 dmub_asic = DMUB_ASIC_DCN32;
2030                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2031                 break;
2032         case IP_VERSION(3, 2, 1):
2033                 dmub_asic = DMUB_ASIC_DCN321;
2034                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2035                 break;
2036         default:
2037                 /* ASIC doesn't support DMUB. */
2038                 return 0;
2039         }
2040
2041         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2042         if (r) {
2043                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2044                 return 0;
2045         }
2046
2047         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2048         if (r) {
2049                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2050                 return 0;
2051         }
2052
2053         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2054         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2055
2056         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2057                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2058                         AMDGPU_UCODE_ID_DMCUB;
2059                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2060                         adev->dm.dmub_fw;
2061                 adev->firmware.fw_size +=
2062                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2063
2064                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2065                          adev->dm.dmcub_fw_version);
2066         }
2067
2068
2069         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2070         dmub_srv = adev->dm.dmub_srv;
2071
2072         if (!dmub_srv) {
2073                 DRM_ERROR("Failed to allocate DMUB service!\n");
2074                 return -ENOMEM;
2075         }
2076
2077         memset(&create_params, 0, sizeof(create_params));
2078         create_params.user_ctx = adev;
2079         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2080         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2081         create_params.asic = dmub_asic;
2082
2083         /* Create the DMUB service. */
2084         status = dmub_srv_create(dmub_srv, &create_params);
2085         if (status != DMUB_STATUS_OK) {
2086                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2087                 return -EINVAL;
2088         }
2089
2090         /* Calculate the size of all the regions for the DMUB service. */
2091         memset(&region_params, 0, sizeof(region_params));
2092
2093         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2094                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2095         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2096         region_params.vbios_size = adev->bios_size;
2097         region_params.fw_bss_data = region_params.bss_data_size ?
2098                 adev->dm.dmub_fw->data +
2099                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2100                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2101         region_params.fw_inst_const =
2102                 adev->dm.dmub_fw->data +
2103                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2104                 PSP_HEADER_BYTES;
2105
2106         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2107                                            &region_info);
2108
2109         if (status != DMUB_STATUS_OK) {
2110                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2111                 return -EINVAL;
2112         }
2113
2114         /*
2115          * Allocate a framebuffer based on the total size of all the regions.
2116          * TODO: Move this into GART.
2117          */
2118         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2119                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2120                                     &adev->dm.dmub_bo_gpu_addr,
2121                                     &adev->dm.dmub_bo_cpu_addr);
2122         if (r)
2123                 return r;
2124
2125         /* Rebase the regions on the framebuffer address. */
2126         memset(&fb_params, 0, sizeof(fb_params));
2127         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2128         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2129         fb_params.region_info = &region_info;
2130
2131         adev->dm.dmub_fb_info =
2132                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2133         fb_info = adev->dm.dmub_fb_info;
2134
2135         if (!fb_info) {
2136                 DRM_ERROR(
2137                         "Failed to allocate framebuffer info for DMUB service!\n");
2138                 return -ENOMEM;
2139         }
2140
2141         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2142         if (status != DMUB_STATUS_OK) {
2143                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2144                 return -EINVAL;
2145         }
2146
2147         return 0;
2148 }
2149
2150 static int dm_sw_init(void *handle)
2151 {
2152         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2153         int r;
2154
2155         r = dm_dmub_sw_init(adev);
2156         if (r)
2157                 return r;
2158
2159         return load_dmcu_fw(adev);
2160 }
2161
2162 static int dm_sw_fini(void *handle)
2163 {
2164         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2165
2166         kfree(adev->dm.dmub_fb_info);
2167         adev->dm.dmub_fb_info = NULL;
2168
2169         if (adev->dm.dmub_srv) {
2170                 dmub_srv_destroy(adev->dm.dmub_srv);
2171                 adev->dm.dmub_srv = NULL;
2172         }
2173
2174         release_firmware(adev->dm.dmub_fw);
2175         adev->dm.dmub_fw = NULL;
2176
2177         release_firmware(adev->dm.fw_dmcu);
2178         adev->dm.fw_dmcu = NULL;
2179
2180         return 0;
2181 }
2182
2183 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2184 {
2185         struct amdgpu_dm_connector *aconnector;
2186         struct drm_connector *connector;
2187         struct drm_connector_list_iter iter;
2188         int ret = 0;
2189
2190         drm_connector_list_iter_begin(dev, &iter);
2191         drm_for_each_connector_iter(connector, &iter) {
2192                 aconnector = to_amdgpu_dm_connector(connector);
2193                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2194                     aconnector->mst_mgr.aux) {
2195                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2196                                          aconnector,
2197                                          aconnector->base.base.id);
2198
2199                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2200                         if (ret < 0) {
2201                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2202                                 aconnector->dc_link->type =
2203                                         dc_connection_single;
2204                                 break;
2205                         }
2206                 }
2207         }
2208         drm_connector_list_iter_end(&iter);
2209
2210         return ret;
2211 }
2212
2213 static int dm_late_init(void *handle)
2214 {
2215         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2216
2217         struct dmcu_iram_parameters params;
2218         unsigned int linear_lut[16];
2219         int i;
2220         struct dmcu *dmcu = NULL;
2221
2222         dmcu = adev->dm.dc->res_pool->dmcu;
2223
2224         for (i = 0; i < 16; i++)
2225                 linear_lut[i] = 0xFFFF * i / 15;
2226
2227         params.set = 0;
2228         params.backlight_ramping_override = false;
2229         params.backlight_ramping_start = 0xCCCC;
2230         params.backlight_ramping_reduction = 0xCCCCCCCC;
2231         params.backlight_lut_array_size = 16;
2232         params.backlight_lut_array = linear_lut;
2233
2234         /* Min backlight level after ABM reduction,  Don't allow below 1%
2235          * 0xFFFF x 0.01 = 0x28F
2236          */
2237         params.min_abm_backlight = 0x28F;
2238         /* In the case where abm is implemented on dmcub,
2239         * dmcu object will be null.
2240         * ABM 2.4 and up are implemented on dmcub.
2241         */
2242         if (dmcu) {
2243                 if (!dmcu_load_iram(dmcu, params))
2244                         return -EINVAL;
2245         } else if (adev->dm.dc->ctx->dmub_srv) {
2246                 struct dc_link *edp_links[MAX_NUM_EDP];
2247                 int edp_num;
2248
2249                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2250                 for (i = 0; i < edp_num; i++) {
2251                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2252                                 return -EINVAL;
2253                 }
2254         }
2255
2256         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2257 }
2258
2259 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2260 {
2261         struct amdgpu_dm_connector *aconnector;
2262         struct drm_connector *connector;
2263         struct drm_connector_list_iter iter;
2264         struct drm_dp_mst_topology_mgr *mgr;
2265         int ret;
2266         bool need_hotplug = false;
2267
2268         drm_connector_list_iter_begin(dev, &iter);
2269         drm_for_each_connector_iter(connector, &iter) {
2270                 aconnector = to_amdgpu_dm_connector(connector);
2271                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2272                     aconnector->mst_port)
2273                         continue;
2274
2275                 mgr = &aconnector->mst_mgr;
2276
2277                 if (suspend) {
2278                         drm_dp_mst_topology_mgr_suspend(mgr);
2279                 } else {
2280                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2281                         if (ret < 0) {
2282                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2283                                         aconnector->dc_link);
2284                                 need_hotplug = true;
2285                         }
2286                 }
2287         }
2288         drm_connector_list_iter_end(&iter);
2289
2290         if (need_hotplug)
2291                 drm_kms_helper_hotplug_event(dev);
2292 }
2293
2294 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2295 {
2296         int ret = 0;
2297
2298         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2299          * on window driver dc implementation.
2300          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2301          * should be passed to smu during boot up and resume from s3.
2302          * boot up: dc calculate dcn watermark clock settings within dc_create,
2303          * dcn20_resource_construct
2304          * then call pplib functions below to pass the settings to smu:
2305          * smu_set_watermarks_for_clock_ranges
2306          * smu_set_watermarks_table
2307          * navi10_set_watermarks_table
2308          * smu_write_watermarks_table
2309          *
2310          * For Renoir, clock settings of dcn watermark are also fixed values.
2311          * dc has implemented different flow for window driver:
2312          * dc_hardware_init / dc_set_power_state
2313          * dcn10_init_hw
2314          * notify_wm_ranges
2315          * set_wm_ranges
2316          * -- Linux
2317          * smu_set_watermarks_for_clock_ranges
2318          * renoir_set_watermarks_table
2319          * smu_write_watermarks_table
2320          *
2321          * For Linux,
2322          * dc_hardware_init -> amdgpu_dm_init
2323          * dc_set_power_state --> dm_resume
2324          *
2325          * therefore, this function apply to navi10/12/14 but not Renoir
2326          * *
2327          */
2328         switch (adev->ip_versions[DCE_HWIP][0]) {
2329         case IP_VERSION(2, 0, 2):
2330         case IP_VERSION(2, 0, 0):
2331                 break;
2332         default:
2333                 return 0;
2334         }
2335
2336         ret = amdgpu_dpm_write_watermarks_table(adev);
2337         if (ret) {
2338                 DRM_ERROR("Failed to update WMTABLE!\n");
2339                 return ret;
2340         }
2341
2342         return 0;
2343 }
2344
2345 /**
2346  * dm_hw_init() - Initialize DC device
2347  * @handle: The base driver device containing the amdgpu_dm device.
2348  *
2349  * Initialize the &struct amdgpu_display_manager device. This involves calling
2350  * the initializers of each DM component, then populating the struct with them.
2351  *
2352  * Although the function implies hardware initialization, both hardware and
2353  * software are initialized here. Splitting them out to their relevant init
2354  * hooks is a future TODO item.
2355  *
2356  * Some notable things that are initialized here:
2357  *
2358  * - Display Core, both software and hardware
2359  * - DC modules that we need (freesync and color management)
2360  * - DRM software states
2361  * - Interrupt sources and handlers
2362  * - Vblank support
2363  * - Debug FS entries, if enabled
2364  */
2365 static int dm_hw_init(void *handle)
2366 {
2367         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2368         /* Create DAL display manager */
2369         amdgpu_dm_init(adev);
2370         amdgpu_dm_hpd_init(adev);
2371
2372         return 0;
2373 }
2374
2375 /**
2376  * dm_hw_fini() - Teardown DC device
2377  * @handle: The base driver device containing the amdgpu_dm device.
2378  *
2379  * Teardown components within &struct amdgpu_display_manager that require
2380  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2381  * were loaded. Also flush IRQ workqueues and disable them.
2382  */
2383 static int dm_hw_fini(void *handle)
2384 {
2385         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2386
2387         amdgpu_dm_hpd_fini(adev);
2388
2389         amdgpu_dm_irq_fini(adev);
2390         amdgpu_dm_fini(adev);
2391         return 0;
2392 }
2393
2394
2395 static int dm_enable_vblank(struct drm_crtc *crtc);
2396 static void dm_disable_vblank(struct drm_crtc *crtc);
2397
2398 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2399                                  struct dc_state *state, bool enable)
2400 {
2401         enum dc_irq_source irq_source;
2402         struct amdgpu_crtc *acrtc;
2403         int rc = -EBUSY;
2404         int i = 0;
2405
2406         for (i = 0; i < state->stream_count; i++) {
2407                 acrtc = get_crtc_by_otg_inst(
2408                                 adev, state->stream_status[i].primary_otg_inst);
2409
2410                 if (acrtc && state->stream_status[i].plane_count != 0) {
2411                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2412                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2413                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2414                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2415                         if (rc)
2416                                 DRM_WARN("Failed to %s pflip interrupts\n",
2417                                          enable ? "enable" : "disable");
2418
2419                         if (enable) {
2420                                 rc = dm_enable_vblank(&acrtc->base);
2421                                 if (rc)
2422                                         DRM_WARN("Failed to enable vblank interrupts\n");
2423                         } else {
2424                                 dm_disable_vblank(&acrtc->base);
2425                         }
2426
2427                 }
2428         }
2429
2430 }
2431
2432 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2433 {
2434         struct dc_state *context = NULL;
2435         enum dc_status res = DC_ERROR_UNEXPECTED;
2436         int i;
2437         struct dc_stream_state *del_streams[MAX_PIPES];
2438         int del_streams_count = 0;
2439
2440         memset(del_streams, 0, sizeof(del_streams));
2441
2442         context = dc_create_state(dc);
2443         if (context == NULL)
2444                 goto context_alloc_fail;
2445
2446         dc_resource_state_copy_construct_current(dc, context);
2447
2448         /* First remove from context all streams */
2449         for (i = 0; i < context->stream_count; i++) {
2450                 struct dc_stream_state *stream = context->streams[i];
2451
2452                 del_streams[del_streams_count++] = stream;
2453         }
2454
2455         /* Remove all planes for removed streams and then remove the streams */
2456         for (i = 0; i < del_streams_count; i++) {
2457                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2458                         res = DC_FAIL_DETACH_SURFACES;
2459                         goto fail;
2460                 }
2461
2462                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2463                 if (res != DC_OK)
2464                         goto fail;
2465         }
2466
2467         res = dc_commit_state(dc, context);
2468
2469 fail:
2470         dc_release_state(context);
2471
2472 context_alloc_fail:
2473         return res;
2474 }
2475
2476 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2477 {
2478         int i;
2479
2480         if (dm->hpd_rx_offload_wq) {
2481                 for (i = 0; i < dm->dc->caps.max_links; i++)
2482                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2483         }
2484 }
2485
2486 static int dm_suspend(void *handle)
2487 {
2488         struct amdgpu_device *adev = handle;
2489         struct amdgpu_display_manager *dm = &adev->dm;
2490         int ret = 0;
2491
2492         if (amdgpu_in_reset(adev)) {
2493                 mutex_lock(&dm->dc_lock);
2494
2495                 dc_allow_idle_optimizations(adev->dm.dc, false);
2496
2497                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2498
2499                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2500
2501                 amdgpu_dm_commit_zero_streams(dm->dc);
2502
2503                 amdgpu_dm_irq_suspend(adev);
2504
2505                 hpd_rx_irq_work_suspend(dm);
2506
2507                 return ret;
2508         }
2509
2510         WARN_ON(adev->dm.cached_state);
2511         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2512
2513         s3_handle_mst(adev_to_drm(adev), true);
2514
2515         amdgpu_dm_irq_suspend(adev);
2516
2517         hpd_rx_irq_work_suspend(dm);
2518
2519         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2520
2521         return 0;
2522 }
2523
2524 struct amdgpu_dm_connector *
2525 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2526                                              struct drm_crtc *crtc)
2527 {
2528         uint32_t i;
2529         struct drm_connector_state *new_con_state;
2530         struct drm_connector *connector;
2531         struct drm_crtc *crtc_from_state;
2532
2533         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2534                 crtc_from_state = new_con_state->crtc;
2535
2536                 if (crtc_from_state == crtc)
2537                         return to_amdgpu_dm_connector(connector);
2538         }
2539
2540         return NULL;
2541 }
2542
2543 static void emulated_link_detect(struct dc_link *link)
2544 {
2545         struct dc_sink_init_data sink_init_data = { 0 };
2546         struct display_sink_capability sink_caps = { 0 };
2547         enum dc_edid_status edid_status;
2548         struct dc_context *dc_ctx = link->ctx;
2549         struct dc_sink *sink = NULL;
2550         struct dc_sink *prev_sink = NULL;
2551
2552         link->type = dc_connection_none;
2553         prev_sink = link->local_sink;
2554
2555         if (prev_sink)
2556                 dc_sink_release(prev_sink);
2557
2558         switch (link->connector_signal) {
2559         case SIGNAL_TYPE_HDMI_TYPE_A: {
2560                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2561                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2562                 break;
2563         }
2564
2565         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2566                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2567                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2568                 break;
2569         }
2570
2571         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2572                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2573                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2574                 break;
2575         }
2576
2577         case SIGNAL_TYPE_LVDS: {
2578                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2579                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2580                 break;
2581         }
2582
2583         case SIGNAL_TYPE_EDP: {
2584                 sink_caps.transaction_type =
2585                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2586                 sink_caps.signal = SIGNAL_TYPE_EDP;
2587                 break;
2588         }
2589
2590         case SIGNAL_TYPE_DISPLAY_PORT: {
2591                 sink_caps.transaction_type =
2592                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2593                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2594                 break;
2595         }
2596
2597         default:
2598                 DC_ERROR("Invalid connector type! signal:%d\n",
2599                         link->connector_signal);
2600                 return;
2601         }
2602
2603         sink_init_data.link = link;
2604         sink_init_data.sink_signal = sink_caps.signal;
2605
2606         sink = dc_sink_create(&sink_init_data);
2607         if (!sink) {
2608                 DC_ERROR("Failed to create sink!\n");
2609                 return;
2610         }
2611
2612         /* dc_sink_create returns a new reference */
2613         link->local_sink = sink;
2614
2615         edid_status = dm_helpers_read_local_edid(
2616                         link->ctx,
2617                         link,
2618                         sink);
2619
2620         if (edid_status != EDID_OK)
2621                 DC_ERROR("Failed to read EDID");
2622
2623 }
2624
2625 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2626                                      struct amdgpu_display_manager *dm)
2627 {
2628         struct {
2629                 struct dc_surface_update surface_updates[MAX_SURFACES];
2630                 struct dc_plane_info plane_infos[MAX_SURFACES];
2631                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2632                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2633                 struct dc_stream_update stream_update;
2634         } * bundle;
2635         int k, m;
2636
2637         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2638
2639         if (!bundle) {
2640                 dm_error("Failed to allocate update bundle\n");
2641                 goto cleanup;
2642         }
2643
2644         for (k = 0; k < dc_state->stream_count; k++) {
2645                 bundle->stream_update.stream = dc_state->streams[k];
2646
2647                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2648                         bundle->surface_updates[m].surface =
2649                                 dc_state->stream_status->plane_states[m];
2650                         bundle->surface_updates[m].surface->force_full_update =
2651                                 true;
2652                 }
2653                 dc_commit_updates_for_stream(
2654                         dm->dc, bundle->surface_updates,
2655                         dc_state->stream_status->plane_count,
2656                         dc_state->streams[k], &bundle->stream_update, dc_state);
2657         }
2658
2659 cleanup:
2660         kfree(bundle);
2661
2662         return;
2663 }
2664
2665 static int dm_resume(void *handle)
2666 {
2667         struct amdgpu_device *adev = handle;
2668         struct drm_device *ddev = adev_to_drm(adev);
2669         struct amdgpu_display_manager *dm = &adev->dm;
2670         struct amdgpu_dm_connector *aconnector;
2671         struct drm_connector *connector;
2672         struct drm_connector_list_iter iter;
2673         struct drm_crtc *crtc;
2674         struct drm_crtc_state *new_crtc_state;
2675         struct dm_crtc_state *dm_new_crtc_state;
2676         struct drm_plane *plane;
2677         struct drm_plane_state *new_plane_state;
2678         struct dm_plane_state *dm_new_plane_state;
2679         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2680         enum dc_connection_type new_connection_type = dc_connection_none;
2681         struct dc_state *dc_state;
2682         int i, r, j;
2683
2684         if (amdgpu_in_reset(adev)) {
2685                 dc_state = dm->cached_dc_state;
2686
2687                 /*
2688                  * The dc->current_state is backed up into dm->cached_dc_state
2689                  * before we commit 0 streams.
2690                  *
2691                  * DC will clear link encoder assignments on the real state
2692                  * but the changes won't propagate over to the copy we made
2693                  * before the 0 streams commit.
2694                  *
2695                  * DC expects that link encoder assignments are *not* valid
2696                  * when committing a state, so as a workaround we can copy
2697                  * off of the current state.
2698                  *
2699                  * We lose the previous assignments, but we had already
2700                  * commit 0 streams anyway.
2701                  */
2702                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2703
2704                 r = dm_dmub_hw_init(adev);
2705                 if (r)
2706                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2707
2708                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2709                 dc_resume(dm->dc);
2710
2711                 amdgpu_dm_irq_resume_early(adev);
2712
2713                 for (i = 0; i < dc_state->stream_count; i++) {
2714                         dc_state->streams[i]->mode_changed = true;
2715                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2716                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2717                                         = 0xffffffff;
2718                         }
2719                 }
2720
2721                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2722                         amdgpu_dm_outbox_init(adev);
2723                         dc_enable_dmub_outbox(adev->dm.dc);
2724                 }
2725
2726                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2727
2728                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2729
2730                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2731
2732                 dc_release_state(dm->cached_dc_state);
2733                 dm->cached_dc_state = NULL;
2734
2735                 amdgpu_dm_irq_resume_late(adev);
2736
2737                 mutex_unlock(&dm->dc_lock);
2738
2739                 return 0;
2740         }
2741         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2742         dc_release_state(dm_state->context);
2743         dm_state->context = dc_create_state(dm->dc);
2744         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2745         dc_resource_state_construct(dm->dc, dm_state->context);
2746
2747         /* Before powering on DC we need to re-initialize DMUB. */
2748         dm_dmub_hw_resume(adev);
2749
2750         /* Re-enable outbox interrupts for DPIA. */
2751         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2752                 amdgpu_dm_outbox_init(adev);
2753                 dc_enable_dmub_outbox(adev->dm.dc);
2754         }
2755
2756         /* power on hardware */
2757         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2758
2759         /* program HPD filter */
2760         dc_resume(dm->dc);
2761
2762         /*
2763          * early enable HPD Rx IRQ, should be done before set mode as short
2764          * pulse interrupts are used for MST
2765          */
2766         amdgpu_dm_irq_resume_early(adev);
2767
2768         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2769         s3_handle_mst(ddev, false);
2770
2771         /* Do detection*/
2772         drm_connector_list_iter_begin(ddev, &iter);
2773         drm_for_each_connector_iter(connector, &iter) {
2774                 aconnector = to_amdgpu_dm_connector(connector);
2775
2776                 /*
2777                  * this is the case when traversing through already created
2778                  * MST connectors, should be skipped
2779                  */
2780                 if (aconnector->dc_link &&
2781                     aconnector->dc_link->type == dc_connection_mst_branch)
2782                         continue;
2783
2784                 mutex_lock(&aconnector->hpd_lock);
2785                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2786                         DRM_ERROR("KMS: Failed to detect connector\n");
2787
2788                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2789                         emulated_link_detect(aconnector->dc_link);
2790                 } else {
2791                         mutex_lock(&dm->dc_lock);
2792                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2793                         mutex_unlock(&dm->dc_lock);
2794                 }
2795
2796                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2797                         aconnector->fake_enable = false;
2798
2799                 if (aconnector->dc_sink)
2800                         dc_sink_release(aconnector->dc_sink);
2801                 aconnector->dc_sink = NULL;
2802                 amdgpu_dm_update_connector_after_detect(aconnector);
2803                 mutex_unlock(&aconnector->hpd_lock);
2804         }
2805         drm_connector_list_iter_end(&iter);
2806
2807         /* Force mode set in atomic commit */
2808         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2809                 new_crtc_state->active_changed = true;
2810
2811         /*
2812          * atomic_check is expected to create the dc states. We need to release
2813          * them here, since they were duplicated as part of the suspend
2814          * procedure.
2815          */
2816         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2817                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2818                 if (dm_new_crtc_state->stream) {
2819                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2820                         dc_stream_release(dm_new_crtc_state->stream);
2821                         dm_new_crtc_state->stream = NULL;
2822                 }
2823         }
2824
2825         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2826                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2827                 if (dm_new_plane_state->dc_state) {
2828                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2829                         dc_plane_state_release(dm_new_plane_state->dc_state);
2830                         dm_new_plane_state->dc_state = NULL;
2831                 }
2832         }
2833
2834         drm_atomic_helper_resume(ddev, dm->cached_state);
2835
2836         dm->cached_state = NULL;
2837
2838         amdgpu_dm_irq_resume_late(adev);
2839
2840         amdgpu_dm_smu_write_watermarks_table(adev);
2841
2842         return 0;
2843 }
2844
2845 /**
2846  * DOC: DM Lifecycle
2847  *
2848  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2849  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2850  * the base driver's device list to be initialized and torn down accordingly.
2851  *
2852  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2853  */
2854
2855 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2856         .name = "dm",
2857         .early_init = dm_early_init,
2858         .late_init = dm_late_init,
2859         .sw_init = dm_sw_init,
2860         .sw_fini = dm_sw_fini,
2861         .early_fini = amdgpu_dm_early_fini,
2862         .hw_init = dm_hw_init,
2863         .hw_fini = dm_hw_fini,
2864         .suspend = dm_suspend,
2865         .resume = dm_resume,
2866         .is_idle = dm_is_idle,
2867         .wait_for_idle = dm_wait_for_idle,
2868         .check_soft_reset = dm_check_soft_reset,
2869         .soft_reset = dm_soft_reset,
2870         .set_clockgating_state = dm_set_clockgating_state,
2871         .set_powergating_state = dm_set_powergating_state,
2872 };
2873
2874 const struct amdgpu_ip_block_version dm_ip_block =
2875 {
2876         .type = AMD_IP_BLOCK_TYPE_DCE,
2877         .major = 1,
2878         .minor = 0,
2879         .rev = 0,
2880         .funcs = &amdgpu_dm_funcs,
2881 };
2882
2883
2884 /**
2885  * DOC: atomic
2886  *
2887  * *WIP*
2888  */
2889
2890 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2891         .fb_create = amdgpu_display_user_framebuffer_create,
2892         .get_format_info = amd_get_format_info,
2893         .output_poll_changed = drm_fb_helper_output_poll_changed,
2894         .atomic_check = amdgpu_dm_atomic_check,
2895         .atomic_commit = drm_atomic_helper_commit,
2896 };
2897
2898 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2899         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2900 };
2901
2902 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2903 {
2904         u32 max_avg, min_cll, max, min, q, r;
2905         struct amdgpu_dm_backlight_caps *caps;
2906         struct amdgpu_display_manager *dm;
2907         struct drm_connector *conn_base;
2908         struct amdgpu_device *adev;
2909         struct dc_link *link = NULL;
2910         static const u8 pre_computed_values[] = {
2911                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2912                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2913         int i;
2914
2915         if (!aconnector || !aconnector->dc_link)
2916                 return;
2917
2918         link = aconnector->dc_link;
2919         if (link->connector_signal != SIGNAL_TYPE_EDP)
2920                 return;
2921
2922         conn_base = &aconnector->base;
2923         adev = drm_to_adev(conn_base->dev);
2924         dm = &adev->dm;
2925         for (i = 0; i < dm->num_of_edps; i++) {
2926                 if (link == dm->backlight_link[i])
2927                         break;
2928         }
2929         if (i >= dm->num_of_edps)
2930                 return;
2931         caps = &dm->backlight_caps[i];
2932         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2933         caps->aux_support = false;
2934         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2935         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2936
2937         if (caps->ext_caps->bits.oled == 1 /*||
2938             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2939             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2940                 caps->aux_support = true;
2941
2942         if (amdgpu_backlight == 0)
2943                 caps->aux_support = false;
2944         else if (amdgpu_backlight == 1)
2945                 caps->aux_support = true;
2946
2947         /* From the specification (CTA-861-G), for calculating the maximum
2948          * luminance we need to use:
2949          *      Luminance = 50*2**(CV/32)
2950          * Where CV is a one-byte value.
2951          * For calculating this expression we may need float point precision;
2952          * to avoid this complexity level, we take advantage that CV is divided
2953          * by a constant. From the Euclids division algorithm, we know that CV
2954          * can be written as: CV = 32*q + r. Next, we replace CV in the
2955          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2956          * need to pre-compute the value of r/32. For pre-computing the values
2957          * We just used the following Ruby line:
2958          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2959          * The results of the above expressions can be verified at
2960          * pre_computed_values.
2961          */
2962         q = max_avg >> 5;
2963         r = max_avg % 32;
2964         max = (1 << q) * pre_computed_values[r];
2965
2966         // min luminance: maxLum * (CV/255)^2 / 100
2967         q = DIV_ROUND_CLOSEST(min_cll, 255);
2968         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2969
2970         caps->aux_max_input_signal = max;
2971         caps->aux_min_input_signal = min;
2972 }
2973
2974 void amdgpu_dm_update_connector_after_detect(
2975                 struct amdgpu_dm_connector *aconnector)
2976 {
2977         struct drm_connector *connector = &aconnector->base;
2978         struct drm_device *dev = connector->dev;
2979         struct dc_sink *sink;
2980
2981         /* MST handled by drm_mst framework */
2982         if (aconnector->mst_mgr.mst_state == true)
2983                 return;
2984
2985         sink = aconnector->dc_link->local_sink;
2986         if (sink)
2987                 dc_sink_retain(sink);
2988
2989         /*
2990          * Edid mgmt connector gets first update only in mode_valid hook and then
2991          * the connector sink is set to either fake or physical sink depends on link status.
2992          * Skip if already done during boot.
2993          */
2994         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2995                         && aconnector->dc_em_sink) {
2996
2997                 /*
2998                  * For S3 resume with headless use eml_sink to fake stream
2999                  * because on resume connector->sink is set to NULL
3000                  */
3001                 mutex_lock(&dev->mode_config.mutex);
3002
3003                 if (sink) {
3004                         if (aconnector->dc_sink) {
3005                                 amdgpu_dm_update_freesync_caps(connector, NULL);
3006                                 /*
3007                                  * retain and release below are used to
3008                                  * bump up refcount for sink because the link doesn't point
3009                                  * to it anymore after disconnect, so on next crtc to connector
3010                                  * reshuffle by UMD we will get into unwanted dc_sink release
3011                                  */
3012                                 dc_sink_release(aconnector->dc_sink);
3013                         }
3014                         aconnector->dc_sink = sink;
3015                         dc_sink_retain(aconnector->dc_sink);
3016                         amdgpu_dm_update_freesync_caps(connector,
3017                                         aconnector->edid);
3018                 } else {
3019                         amdgpu_dm_update_freesync_caps(connector, NULL);
3020                         if (!aconnector->dc_sink) {
3021                                 aconnector->dc_sink = aconnector->dc_em_sink;
3022                                 dc_sink_retain(aconnector->dc_sink);
3023                         }
3024                 }
3025
3026                 mutex_unlock(&dev->mode_config.mutex);
3027
3028                 if (sink)
3029                         dc_sink_release(sink);
3030                 return;
3031         }
3032
3033         /*
3034          * TODO: temporary guard to look for proper fix
3035          * if this sink is MST sink, we should not do anything
3036          */
3037         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3038                 dc_sink_release(sink);
3039                 return;
3040         }
3041
3042         if (aconnector->dc_sink == sink) {
3043                 /*
3044                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3045                  * Do nothing!!
3046                  */
3047                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3048                                 aconnector->connector_id);
3049                 if (sink)
3050                         dc_sink_release(sink);
3051                 return;
3052         }
3053
3054         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3055                 aconnector->connector_id, aconnector->dc_sink, sink);
3056
3057         mutex_lock(&dev->mode_config.mutex);
3058
3059         /*
3060          * 1. Update status of the drm connector
3061          * 2. Send an event and let userspace tell us what to do
3062          */
3063         if (sink) {
3064                 /*
3065                  * TODO: check if we still need the S3 mode update workaround.
3066                  * If yes, put it here.
3067                  */
3068                 if (aconnector->dc_sink) {
3069                         amdgpu_dm_update_freesync_caps(connector, NULL);
3070                         dc_sink_release(aconnector->dc_sink);
3071                 }
3072
3073                 aconnector->dc_sink = sink;
3074                 dc_sink_retain(aconnector->dc_sink);
3075                 if (sink->dc_edid.length == 0) {
3076                         aconnector->edid = NULL;
3077                         if (aconnector->dc_link->aux_mode) {
3078                                 drm_dp_cec_unset_edid(
3079                                         &aconnector->dm_dp_aux.aux);
3080                         }
3081                 } else {
3082                         aconnector->edid =
3083                                 (struct edid *)sink->dc_edid.raw_edid;
3084
3085                         if (aconnector->dc_link->aux_mode)
3086                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3087                                                     aconnector->edid);
3088                 }
3089
3090                 drm_connector_update_edid_property(connector, aconnector->edid);
3091                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3092                 update_connector_ext_caps(aconnector);
3093         } else {
3094                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3095                 amdgpu_dm_update_freesync_caps(connector, NULL);
3096                 drm_connector_update_edid_property(connector, NULL);
3097                 aconnector->num_modes = 0;
3098                 dc_sink_release(aconnector->dc_sink);
3099                 aconnector->dc_sink = NULL;
3100                 aconnector->edid = NULL;
3101 #ifdef CONFIG_DRM_AMD_DC_HDCP
3102                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3103                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3104                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3105 #endif
3106         }
3107
3108         mutex_unlock(&dev->mode_config.mutex);
3109
3110         update_subconnector_property(aconnector);
3111
3112         if (sink)
3113                 dc_sink_release(sink);
3114 }
3115
3116 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3117 {
3118         struct drm_connector *connector = &aconnector->base;
3119         struct drm_device *dev = connector->dev;
3120         enum dc_connection_type new_connection_type = dc_connection_none;
3121         struct amdgpu_device *adev = drm_to_adev(dev);
3122 #ifdef CONFIG_DRM_AMD_DC_HDCP
3123         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3124 #endif
3125         bool ret = false;
3126
3127         if (adev->dm.disable_hpd_irq)
3128                 return;
3129
3130         /*
3131          * In case of failure or MST no need to update connector status or notify the OS
3132          * since (for MST case) MST does this in its own context.
3133          */
3134         mutex_lock(&aconnector->hpd_lock);
3135
3136 #ifdef CONFIG_DRM_AMD_DC_HDCP
3137         if (adev->dm.hdcp_workqueue) {
3138                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3139                 dm_con_state->update_hdcp = true;
3140         }
3141 #endif
3142         if (aconnector->fake_enable)
3143                 aconnector->fake_enable = false;
3144
3145         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3146                 DRM_ERROR("KMS: Failed to detect connector\n");
3147
3148         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3149                 emulated_link_detect(aconnector->dc_link);
3150
3151                 drm_modeset_lock_all(dev);
3152                 dm_restore_drm_connector_state(dev, connector);
3153                 drm_modeset_unlock_all(dev);
3154
3155                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3156                         drm_kms_helper_connector_hotplug_event(connector);
3157         } else {
3158                 mutex_lock(&adev->dm.dc_lock);
3159                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3160                 mutex_unlock(&adev->dm.dc_lock);
3161                 if (ret) {
3162                         amdgpu_dm_update_connector_after_detect(aconnector);
3163
3164                         drm_modeset_lock_all(dev);
3165                         dm_restore_drm_connector_state(dev, connector);
3166                         drm_modeset_unlock_all(dev);
3167
3168                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3169                                 drm_kms_helper_connector_hotplug_event(connector);
3170                 }
3171         }
3172         mutex_unlock(&aconnector->hpd_lock);
3173
3174 }
3175
3176 static void handle_hpd_irq(void *param)
3177 {
3178         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3179
3180         handle_hpd_irq_helper(aconnector);
3181
3182 }
3183
3184 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3185 {
3186         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3187         uint8_t dret;
3188         bool new_irq_handled = false;
3189         int dpcd_addr;
3190         int dpcd_bytes_to_read;
3191
3192         const int max_process_count = 30;
3193         int process_count = 0;
3194
3195         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3196
3197         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3198                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3199                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3200                 dpcd_addr = DP_SINK_COUNT;
3201         } else {
3202                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3203                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3204                 dpcd_addr = DP_SINK_COUNT_ESI;
3205         }
3206
3207         dret = drm_dp_dpcd_read(
3208                 &aconnector->dm_dp_aux.aux,
3209                 dpcd_addr,
3210                 esi,
3211                 dpcd_bytes_to_read);
3212
3213         while (dret == dpcd_bytes_to_read &&
3214                 process_count < max_process_count) {
3215                 uint8_t retry;
3216                 dret = 0;
3217
3218                 process_count++;
3219
3220                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3221                 /* handle HPD short pulse irq */
3222                 if (aconnector->mst_mgr.mst_state)
3223                         drm_dp_mst_hpd_irq(
3224                                 &aconnector->mst_mgr,
3225                                 esi,
3226                                 &new_irq_handled);
3227
3228                 if (new_irq_handled) {
3229                         /* ACK at DPCD to notify down stream */
3230                         const int ack_dpcd_bytes_to_write =
3231                                 dpcd_bytes_to_read - 1;
3232
3233                         for (retry = 0; retry < 3; retry++) {
3234                                 uint8_t wret;
3235
3236                                 wret = drm_dp_dpcd_write(
3237                                         &aconnector->dm_dp_aux.aux,
3238                                         dpcd_addr + 1,
3239                                         &esi[1],
3240                                         ack_dpcd_bytes_to_write);
3241                                 if (wret == ack_dpcd_bytes_to_write)
3242                                         break;
3243                         }
3244
3245                         /* check if there is new irq to be handled */
3246                         dret = drm_dp_dpcd_read(
3247                                 &aconnector->dm_dp_aux.aux,
3248                                 dpcd_addr,
3249                                 esi,
3250                                 dpcd_bytes_to_read);
3251
3252                         new_irq_handled = false;
3253                 } else {
3254                         break;
3255                 }
3256         }
3257
3258         if (process_count == max_process_count)
3259                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3260 }
3261
3262 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3263                                                         union hpd_irq_data hpd_irq_data)
3264 {
3265         struct hpd_rx_irq_offload_work *offload_work =
3266                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3267
3268         if (!offload_work) {
3269                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3270                 return;
3271         }
3272
3273         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3274         offload_work->data = hpd_irq_data;
3275         offload_work->offload_wq = offload_wq;
3276
3277         queue_work(offload_wq->wq, &offload_work->work);
3278         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3279 }
3280
3281 static void handle_hpd_rx_irq(void *param)
3282 {
3283         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3284         struct drm_connector *connector = &aconnector->base;
3285         struct drm_device *dev = connector->dev;
3286         struct dc_link *dc_link = aconnector->dc_link;
3287         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3288         bool result = false;
3289         enum dc_connection_type new_connection_type = dc_connection_none;
3290         struct amdgpu_device *adev = drm_to_adev(dev);
3291         union hpd_irq_data hpd_irq_data;
3292         bool link_loss = false;
3293         bool has_left_work = false;
3294         int idx = aconnector->base.index;
3295         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3296
3297         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3298
3299         if (adev->dm.disable_hpd_irq)
3300                 return;
3301
3302         /*
3303          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3304          * conflict, after implement i2c helper, this mutex should be
3305          * retired.
3306          */
3307         mutex_lock(&aconnector->hpd_lock);
3308
3309         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3310                                                 &link_loss, true, &has_left_work);
3311
3312         if (!has_left_work)
3313                 goto out;
3314
3315         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3316                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3317                 goto out;
3318         }
3319
3320         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3321                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3322                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3323                         dm_handle_mst_sideband_msg(aconnector);
3324                         goto out;
3325                 }
3326
3327                 if (link_loss) {
3328                         bool skip = false;
3329
3330                         spin_lock(&offload_wq->offload_lock);
3331                         skip = offload_wq->is_handling_link_loss;
3332
3333                         if (!skip)
3334                                 offload_wq->is_handling_link_loss = true;
3335
3336                         spin_unlock(&offload_wq->offload_lock);
3337
3338                         if (!skip)
3339                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3340
3341                         goto out;
3342                 }
3343         }
3344
3345 out:
3346         if (result && !is_mst_root_connector) {
3347                 /* Downstream Port status changed. */
3348                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3349                         DRM_ERROR("KMS: Failed to detect connector\n");
3350
3351                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3352                         emulated_link_detect(dc_link);
3353
3354                         if (aconnector->fake_enable)
3355                                 aconnector->fake_enable = false;
3356
3357                         amdgpu_dm_update_connector_after_detect(aconnector);
3358
3359
3360                         drm_modeset_lock_all(dev);
3361                         dm_restore_drm_connector_state(dev, connector);
3362                         drm_modeset_unlock_all(dev);
3363
3364                         drm_kms_helper_connector_hotplug_event(connector);
3365                 } else {
3366                         bool ret = false;
3367
3368                         mutex_lock(&adev->dm.dc_lock);
3369                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3370                         mutex_unlock(&adev->dm.dc_lock);
3371
3372                         if (ret) {
3373                                 if (aconnector->fake_enable)
3374                                         aconnector->fake_enable = false;
3375
3376                                 amdgpu_dm_update_connector_after_detect(aconnector);
3377
3378                                 drm_modeset_lock_all(dev);
3379                                 dm_restore_drm_connector_state(dev, connector);
3380                                 drm_modeset_unlock_all(dev);
3381
3382                                 drm_kms_helper_connector_hotplug_event(connector);
3383                         }
3384                 }
3385         }
3386 #ifdef CONFIG_DRM_AMD_DC_HDCP
3387         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3388                 if (adev->dm.hdcp_workqueue)
3389                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3390         }
3391 #endif
3392
3393         if (dc_link->type != dc_connection_mst_branch)
3394                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3395
3396         mutex_unlock(&aconnector->hpd_lock);
3397 }
3398
3399 static void register_hpd_handlers(struct amdgpu_device *adev)
3400 {
3401         struct drm_device *dev = adev_to_drm(adev);
3402         struct drm_connector *connector;
3403         struct amdgpu_dm_connector *aconnector;
3404         const struct dc_link *dc_link;
3405         struct dc_interrupt_params int_params = {0};
3406
3407         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3408         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3409
3410         list_for_each_entry(connector,
3411                         &dev->mode_config.connector_list, head) {
3412
3413                 aconnector = to_amdgpu_dm_connector(connector);
3414                 dc_link = aconnector->dc_link;
3415
3416                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3417                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3418                         int_params.irq_source = dc_link->irq_source_hpd;
3419
3420                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3421                                         handle_hpd_irq,
3422                                         (void *) aconnector);
3423                 }
3424
3425                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3426
3427                         /* Also register for DP short pulse (hpd_rx). */
3428                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3429                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3430
3431                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3432                                         handle_hpd_rx_irq,
3433                                         (void *) aconnector);
3434
3435                         if (adev->dm.hpd_rx_offload_wq)
3436                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3437                                         aconnector;
3438                 }
3439         }
3440 }
3441
3442 #if defined(CONFIG_DRM_AMD_DC_SI)
3443 /* Register IRQ sources and initialize IRQ callbacks */
3444 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3445 {
3446         struct dc *dc = adev->dm.dc;
3447         struct common_irq_params *c_irq_params;
3448         struct dc_interrupt_params int_params = {0};
3449         int r;
3450         int i;
3451         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3452
3453         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3454         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3455
3456         /*
3457          * Actions of amdgpu_irq_add_id():
3458          * 1. Register a set() function with base driver.
3459          *    Base driver will call set() function to enable/disable an
3460          *    interrupt in DC hardware.
3461          * 2. Register amdgpu_dm_irq_handler().
3462          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3463          *    coming from DC hardware.
3464          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3465          *    for acknowledging and handling. */
3466
3467         /* Use VBLANK interrupt */
3468         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3469                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3470                 if (r) {
3471                         DRM_ERROR("Failed to add crtc irq id!\n");
3472                         return r;
3473                 }
3474
3475                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3476                 int_params.irq_source =
3477                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3478
3479                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3480
3481                 c_irq_params->adev = adev;
3482                 c_irq_params->irq_src = int_params.irq_source;
3483
3484                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3485                                 dm_crtc_high_irq, c_irq_params);
3486         }
3487
3488         /* Use GRPH_PFLIP interrupt */
3489         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3490                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3491                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3492                 if (r) {
3493                         DRM_ERROR("Failed to add page flip irq id!\n");
3494                         return r;
3495                 }
3496
3497                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3498                 int_params.irq_source =
3499                         dc_interrupt_to_irq_source(dc, i, 0);
3500
3501                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3502
3503                 c_irq_params->adev = adev;
3504                 c_irq_params->irq_src = int_params.irq_source;
3505
3506                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3507                                 dm_pflip_high_irq, c_irq_params);
3508
3509         }
3510
3511         /* HPD */
3512         r = amdgpu_irq_add_id(adev, client_id,
3513                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3514         if (r) {
3515                 DRM_ERROR("Failed to add hpd irq id!\n");
3516                 return r;
3517         }
3518
3519         register_hpd_handlers(adev);
3520
3521         return 0;
3522 }
3523 #endif
3524
3525 /* Register IRQ sources and initialize IRQ callbacks */
3526 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3527 {
3528         struct dc *dc = adev->dm.dc;
3529         struct common_irq_params *c_irq_params;
3530         struct dc_interrupt_params int_params = {0};
3531         int r;
3532         int i;
3533         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3534
3535         if (adev->family >= AMDGPU_FAMILY_AI)
3536                 client_id = SOC15_IH_CLIENTID_DCE;
3537
3538         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3539         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3540
3541         /*
3542          * Actions of amdgpu_irq_add_id():
3543          * 1. Register a set() function with base driver.
3544          *    Base driver will call set() function to enable/disable an
3545          *    interrupt in DC hardware.
3546          * 2. Register amdgpu_dm_irq_handler().
3547          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3548          *    coming from DC hardware.
3549          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3550          *    for acknowledging and handling. */
3551
3552         /* Use VBLANK interrupt */
3553         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3554                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3555                 if (r) {
3556                         DRM_ERROR("Failed to add crtc irq id!\n");
3557                         return r;
3558                 }
3559
3560                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3561                 int_params.irq_source =
3562                         dc_interrupt_to_irq_source(dc, i, 0);
3563
3564                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3565
3566                 c_irq_params->adev = adev;
3567                 c_irq_params->irq_src = int_params.irq_source;
3568
3569                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3570                                 dm_crtc_high_irq, c_irq_params);
3571         }
3572
3573         /* Use VUPDATE interrupt */
3574         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3575                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3576                 if (r) {
3577                         DRM_ERROR("Failed to add vupdate irq id!\n");
3578                         return r;
3579                 }
3580
3581                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582                 int_params.irq_source =
3583                         dc_interrupt_to_irq_source(dc, i, 0);
3584
3585                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3586
3587                 c_irq_params->adev = adev;
3588                 c_irq_params->irq_src = int_params.irq_source;
3589
3590                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3591                                 dm_vupdate_high_irq, c_irq_params);
3592         }
3593
3594         /* Use GRPH_PFLIP interrupt */
3595         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3596                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3597                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3598                 if (r) {
3599                         DRM_ERROR("Failed to add page flip irq id!\n");
3600                         return r;
3601                 }
3602
3603                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3604                 int_params.irq_source =
3605                         dc_interrupt_to_irq_source(dc, i, 0);
3606
3607                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3608
3609                 c_irq_params->adev = adev;
3610                 c_irq_params->irq_src = int_params.irq_source;
3611
3612                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3613                                 dm_pflip_high_irq, c_irq_params);
3614
3615         }
3616
3617         /* HPD */
3618         r = amdgpu_irq_add_id(adev, client_id,
3619                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3620         if (r) {
3621                 DRM_ERROR("Failed to add hpd irq id!\n");
3622                 return r;
3623         }
3624
3625         register_hpd_handlers(adev);
3626
3627         return 0;
3628 }
3629
3630 /* Register IRQ sources and initialize IRQ callbacks */
3631 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3632 {
3633         struct dc *dc = adev->dm.dc;
3634         struct common_irq_params *c_irq_params;
3635         struct dc_interrupt_params int_params = {0};
3636         int r;
3637         int i;
3638 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3639         static const unsigned int vrtl_int_srcid[] = {
3640                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3641                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3642                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3643                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3644                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3645                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3646         };
3647 #endif
3648
3649         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3650         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3651
3652         /*
3653          * Actions of amdgpu_irq_add_id():
3654          * 1. Register a set() function with base driver.
3655          *    Base driver will call set() function to enable/disable an
3656          *    interrupt in DC hardware.
3657          * 2. Register amdgpu_dm_irq_handler().
3658          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3659          *    coming from DC hardware.
3660          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3661          *    for acknowledging and handling.
3662          */
3663
3664         /* Use VSTARTUP interrupt */
3665         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3666                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3667                         i++) {
3668                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3669
3670                 if (r) {
3671                         DRM_ERROR("Failed to add crtc irq id!\n");
3672                         return r;
3673                 }
3674
3675                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3676                 int_params.irq_source =
3677                         dc_interrupt_to_irq_source(dc, i, 0);
3678
3679                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3680
3681                 c_irq_params->adev = adev;
3682                 c_irq_params->irq_src = int_params.irq_source;
3683
3684                 amdgpu_dm_irq_register_interrupt(
3685                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3686         }
3687
3688         /* Use otg vertical line interrupt */
3689 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3690         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3691                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3692                                 vrtl_int_srcid[i], &adev->vline0_irq);
3693
3694                 if (r) {
3695                         DRM_ERROR("Failed to add vline0 irq id!\n");
3696                         return r;
3697                 }
3698
3699                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3700                 int_params.irq_source =
3701                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3702
3703                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3704                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3705                         break;
3706                 }
3707
3708                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3709                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3710
3711                 c_irq_params->adev = adev;
3712                 c_irq_params->irq_src = int_params.irq_source;
3713
3714                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3715                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3716         }
3717 #endif
3718
3719         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3720          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3721          * to trigger at end of each vblank, regardless of state of the lock,
3722          * matching DCE behaviour.
3723          */
3724         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3725              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3726              i++) {
3727                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3728
3729                 if (r) {
3730                         DRM_ERROR("Failed to add vupdate irq id!\n");
3731                         return r;
3732                 }
3733
3734                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3735                 int_params.irq_source =
3736                         dc_interrupt_to_irq_source(dc, i, 0);
3737
3738                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3739
3740                 c_irq_params->adev = adev;
3741                 c_irq_params->irq_src = int_params.irq_source;
3742
3743                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3744                                 dm_vupdate_high_irq, c_irq_params);
3745         }
3746
3747         /* Use GRPH_PFLIP interrupt */
3748         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3749                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3750                         i++) {
3751                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3752                 if (r) {
3753                         DRM_ERROR("Failed to add page flip irq id!\n");
3754                         return r;
3755                 }
3756
3757                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3758                 int_params.irq_source =
3759                         dc_interrupt_to_irq_source(dc, i, 0);
3760
3761                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3762
3763                 c_irq_params->adev = adev;
3764                 c_irq_params->irq_src = int_params.irq_source;
3765
3766                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3767                                 dm_pflip_high_irq, c_irq_params);
3768
3769         }
3770
3771         /* HPD */
3772         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3773                         &adev->hpd_irq);
3774         if (r) {
3775                 DRM_ERROR("Failed to add hpd irq id!\n");
3776                 return r;
3777         }
3778
3779         register_hpd_handlers(adev);
3780
3781         return 0;
3782 }
3783 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3784 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3785 {
3786         struct dc *dc = adev->dm.dc;
3787         struct common_irq_params *c_irq_params;
3788         struct dc_interrupt_params int_params = {0};
3789         int r, i;
3790
3791         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3792         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3793
3794         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3795                         &adev->dmub_outbox_irq);
3796         if (r) {
3797                 DRM_ERROR("Failed to add outbox irq id!\n");
3798                 return r;
3799         }
3800
3801         if (dc->ctx->dmub_srv) {
3802                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3803                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3804                 int_params.irq_source =
3805                 dc_interrupt_to_irq_source(dc, i, 0);
3806
3807                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3808
3809                 c_irq_params->adev = adev;
3810                 c_irq_params->irq_src = int_params.irq_source;
3811
3812                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3813                                 dm_dmub_outbox1_low_irq, c_irq_params);
3814         }
3815
3816         return 0;
3817 }
3818
3819 /*
3820  * Acquires the lock for the atomic state object and returns
3821  * the new atomic state.
3822  *
3823  * This should only be called during atomic check.
3824  */
3825 int dm_atomic_get_state(struct drm_atomic_state *state,
3826                         struct dm_atomic_state **dm_state)
3827 {
3828         struct drm_device *dev = state->dev;
3829         struct amdgpu_device *adev = drm_to_adev(dev);
3830         struct amdgpu_display_manager *dm = &adev->dm;
3831         struct drm_private_state *priv_state;
3832
3833         if (*dm_state)
3834                 return 0;
3835
3836         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3837         if (IS_ERR(priv_state))
3838                 return PTR_ERR(priv_state);
3839
3840         *dm_state = to_dm_atomic_state(priv_state);
3841
3842         return 0;
3843 }
3844
3845 static struct dm_atomic_state *
3846 dm_atomic_get_new_state(struct drm_atomic_state *state)
3847 {
3848         struct drm_device *dev = state->dev;
3849         struct amdgpu_device *adev = drm_to_adev(dev);
3850         struct amdgpu_display_manager *dm = &adev->dm;
3851         struct drm_private_obj *obj;
3852         struct drm_private_state *new_obj_state;
3853         int i;
3854
3855         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3856                 if (obj->funcs == dm->atomic_obj.funcs)
3857                         return to_dm_atomic_state(new_obj_state);
3858         }
3859
3860         return NULL;
3861 }
3862
3863 static struct drm_private_state *
3864 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3865 {
3866         struct dm_atomic_state *old_state, *new_state;
3867
3868         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3869         if (!new_state)
3870                 return NULL;
3871
3872         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3873
3874         old_state = to_dm_atomic_state(obj->state);
3875
3876         if (old_state && old_state->context)
3877                 new_state->context = dc_copy_state(old_state->context);
3878
3879         if (!new_state->context) {
3880                 kfree(new_state);
3881                 return NULL;
3882         }
3883
3884         return &new_state->base;
3885 }
3886
3887 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3888                                     struct drm_private_state *state)
3889 {
3890         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3891
3892         if (dm_state && dm_state->context)
3893                 dc_release_state(dm_state->context);
3894
3895         kfree(dm_state);
3896 }
3897
3898 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3899         .atomic_duplicate_state = dm_atomic_duplicate_state,
3900         .atomic_destroy_state = dm_atomic_destroy_state,
3901 };
3902
3903 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3904 {
3905         struct dm_atomic_state *state;
3906         int r;
3907
3908         adev->mode_info.mode_config_initialized = true;
3909
3910         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3911         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3912
3913         adev_to_drm(adev)->mode_config.max_width = 16384;
3914         adev_to_drm(adev)->mode_config.max_height = 16384;
3915
3916         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3917         /* disable prefer shadow for now due to hibernation issues */
3918         adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3919         /* indicates support for immediate flip */
3920         adev_to_drm(adev)->mode_config.async_page_flip = true;
3921
3922         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3923
3924         state = kzalloc(sizeof(*state), GFP_KERNEL);
3925         if (!state)
3926                 return -ENOMEM;
3927
3928         state->context = dc_create_state(adev->dm.dc);
3929         if (!state->context) {
3930                 kfree(state);
3931                 return -ENOMEM;
3932         }
3933
3934         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3935
3936         drm_atomic_private_obj_init(adev_to_drm(adev),
3937                                     &adev->dm.atomic_obj,
3938                                     &state->base,
3939                                     &dm_atomic_state_funcs);
3940
3941         r = amdgpu_display_modeset_create_props(adev);
3942         if (r) {
3943                 dc_release_state(state->context);
3944                 kfree(state);
3945                 return r;
3946         }
3947
3948         r = amdgpu_dm_audio_init(adev);
3949         if (r) {
3950                 dc_release_state(state->context);
3951                 kfree(state);
3952                 return r;
3953         }
3954
3955         return 0;
3956 }
3957
3958 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3959 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3960 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3961
3962 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3963                                             int bl_idx)
3964 {
3965 #if defined(CONFIG_ACPI)
3966         struct amdgpu_dm_backlight_caps caps;
3967
3968         memset(&caps, 0, sizeof(caps));
3969
3970         if (dm->backlight_caps[bl_idx].caps_valid)
3971                 return;
3972
3973         amdgpu_acpi_get_backlight_caps(&caps);
3974         if (caps.caps_valid) {
3975                 dm->backlight_caps[bl_idx].caps_valid = true;
3976                 if (caps.aux_support)
3977                         return;
3978                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3979                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3980         } else {
3981                 dm->backlight_caps[bl_idx].min_input_signal =
3982                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3983                 dm->backlight_caps[bl_idx].max_input_signal =
3984                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3985         }
3986 #else
3987         if (dm->backlight_caps[bl_idx].aux_support)
3988                 return;
3989
3990         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3991         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3992 #endif
3993 }
3994
3995 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3996                                 unsigned *min, unsigned *max)
3997 {
3998         if (!caps)
3999                 return 0;
4000
4001         if (caps->aux_support) {
4002                 // Firmware limits are in nits, DC API wants millinits.
4003                 *max = 1000 * caps->aux_max_input_signal;
4004                 *min = 1000 * caps->aux_min_input_signal;
4005         } else {
4006                 // Firmware limits are 8-bit, PWM control is 16-bit.
4007                 *max = 0x101 * caps->max_input_signal;
4008                 *min = 0x101 * caps->min_input_signal;
4009         }
4010         return 1;
4011 }
4012
4013 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4014                                         uint32_t brightness)
4015 {
4016         unsigned min, max;
4017
4018         if (!get_brightness_range(caps, &min, &max))
4019                 return brightness;
4020
4021         // Rescale 0..255 to min..max
4022         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4023                                        AMDGPU_MAX_BL_LEVEL);
4024 }
4025
4026 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4027                                       uint32_t brightness)
4028 {
4029         unsigned min, max;
4030
4031         if (!get_brightness_range(caps, &min, &max))
4032                 return brightness;
4033
4034         if (brightness < min)
4035                 return 0;
4036         // Rescale min..max to 0..255
4037         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4038                                  max - min);
4039 }
4040
4041 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4042                                          int bl_idx,
4043                                          u32 user_brightness)
4044 {
4045         struct amdgpu_dm_backlight_caps caps;
4046         struct dc_link *link;
4047         u32 brightness;
4048         bool rc;
4049
4050         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4051         caps = dm->backlight_caps[bl_idx];
4052
4053         dm->brightness[bl_idx] = user_brightness;
4054         /* update scratch register */
4055         if (bl_idx == 0)
4056                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4057         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4058         link = (struct dc_link *)dm->backlight_link[bl_idx];
4059
4060         /* Change brightness based on AUX property */
4061         if (caps.aux_support) {
4062                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4063                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4064                 if (!rc)
4065                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4066         } else {
4067                 rc = dc_link_set_backlight_level(link, brightness, 0);
4068                 if (!rc)
4069                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4070         }
4071
4072         if (rc)
4073                 dm->actual_brightness[bl_idx] = user_brightness;
4074 }
4075
4076 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4077 {
4078         struct amdgpu_display_manager *dm = bl_get_data(bd);
4079         int i;
4080
4081         for (i = 0; i < dm->num_of_edps; i++) {
4082                 if (bd == dm->backlight_dev[i])
4083                         break;
4084         }
4085         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4086                 i = 0;
4087         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4088
4089         return 0;
4090 }
4091
4092 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4093                                          int bl_idx)
4094 {
4095         struct amdgpu_dm_backlight_caps caps;
4096         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4097
4098         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4099         caps = dm->backlight_caps[bl_idx];
4100
4101         if (caps.aux_support) {
4102                 u32 avg, peak;
4103                 bool rc;
4104
4105                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4106                 if (!rc)
4107                         return dm->brightness[bl_idx];
4108                 return convert_brightness_to_user(&caps, avg);
4109         } else {
4110                 int ret = dc_link_get_backlight_level(link);
4111
4112                 if (ret == DC_ERROR_UNEXPECTED)
4113                         return dm->brightness[bl_idx];
4114                 return convert_brightness_to_user(&caps, ret);
4115         }
4116 }
4117
4118 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4119 {
4120         struct amdgpu_display_manager *dm = bl_get_data(bd);
4121         int i;
4122
4123         for (i = 0; i < dm->num_of_edps; i++) {
4124                 if (bd == dm->backlight_dev[i])
4125                         break;
4126         }
4127         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4128                 i = 0;
4129         return amdgpu_dm_backlight_get_level(dm, i);
4130 }
4131
4132 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4133         .options = BL_CORE_SUSPENDRESUME,
4134         .get_brightness = amdgpu_dm_backlight_get_brightness,
4135         .update_status  = amdgpu_dm_backlight_update_status,
4136 };
4137
4138 static void
4139 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4140 {
4141         char bl_name[16];
4142         struct backlight_properties props = { 0 };
4143
4144         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4145         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4146
4147         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4148         props.brightness = AMDGPU_MAX_BL_LEVEL;
4149         props.type = BACKLIGHT_RAW;
4150
4151         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4152                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4153
4154         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4155                                                                        adev_to_drm(dm->adev)->dev,
4156                                                                        dm,
4157                                                                        &amdgpu_dm_backlight_ops,
4158                                                                        &props);
4159
4160         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4161                 DRM_ERROR("DM: Backlight registration failed!\n");
4162         else
4163                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4164 }
4165
4166 static int initialize_plane(struct amdgpu_display_manager *dm,
4167                             struct amdgpu_mode_info *mode_info, int plane_id,
4168                             enum drm_plane_type plane_type,
4169                             const struct dc_plane_cap *plane_cap)
4170 {
4171         struct drm_plane *plane;
4172         unsigned long possible_crtcs;
4173         int ret = 0;
4174
4175         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4176         if (!plane) {
4177                 DRM_ERROR("KMS: Failed to allocate plane\n");
4178                 return -ENOMEM;
4179         }
4180         plane->type = plane_type;
4181
4182         /*
4183          * HACK: IGT tests expect that the primary plane for a CRTC
4184          * can only have one possible CRTC. Only expose support for
4185          * any CRTC if they're not going to be used as a primary plane
4186          * for a CRTC - like overlay or underlay planes.
4187          */
4188         possible_crtcs = 1 << plane_id;
4189         if (plane_id >= dm->dc->caps.max_streams)
4190                 possible_crtcs = 0xff;
4191
4192         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4193
4194         if (ret) {
4195                 DRM_ERROR("KMS: Failed to initialize plane\n");
4196                 kfree(plane);
4197                 return ret;
4198         }
4199
4200         if (mode_info)
4201                 mode_info->planes[plane_id] = plane;
4202
4203         return ret;
4204 }
4205
4206
4207 static void register_backlight_device(struct amdgpu_display_manager *dm,
4208                                       struct dc_link *link)
4209 {
4210         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4211             link->type != dc_connection_none) {
4212                 /*
4213                  * Event if registration failed, we should continue with
4214                  * DM initialization because not having a backlight control
4215                  * is better then a black screen.
4216                  */
4217                 if (!dm->backlight_dev[dm->num_of_edps])
4218                         amdgpu_dm_register_backlight_device(dm);
4219
4220                 if (dm->backlight_dev[dm->num_of_edps]) {
4221                         dm->backlight_link[dm->num_of_edps] = link;
4222                         dm->num_of_edps++;
4223                 }
4224         }
4225 }
4226
4227
4228 /*
4229  * In this architecture, the association
4230  * connector -> encoder -> crtc
4231  * id not really requried. The crtc and connector will hold the
4232  * display_index as an abstraction to use with DAL component
4233  *
4234  * Returns 0 on success
4235  */
4236 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4237 {
4238         struct amdgpu_display_manager *dm = &adev->dm;
4239         int32_t i;
4240         struct amdgpu_dm_connector *aconnector = NULL;
4241         struct amdgpu_encoder *aencoder = NULL;
4242         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4243         uint32_t link_cnt;
4244         int32_t primary_planes;
4245         enum dc_connection_type new_connection_type = dc_connection_none;
4246         const struct dc_plane_cap *plane;
4247         bool psr_feature_enabled = false;
4248
4249         dm->display_indexes_num = dm->dc->caps.max_streams;
4250         /* Update the actual used number of crtc */
4251         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4252
4253         link_cnt = dm->dc->caps.max_links;
4254         if (amdgpu_dm_mode_config_init(dm->adev)) {
4255                 DRM_ERROR("DM: Failed to initialize mode config\n");
4256                 return -EINVAL;
4257         }
4258
4259         /* There is one primary plane per CRTC */
4260         primary_planes = dm->dc->caps.max_streams;
4261         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4262
4263         /*
4264          * Initialize primary planes, implicit planes for legacy IOCTLS.
4265          * Order is reversed to match iteration order in atomic check.
4266          */
4267         for (i = (primary_planes - 1); i >= 0; i--) {
4268                 plane = &dm->dc->caps.planes[i];
4269
4270                 if (initialize_plane(dm, mode_info, i,
4271                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4272                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4273                         goto fail;
4274                 }
4275         }
4276
4277         /*
4278          * Initialize overlay planes, index starting after primary planes.
4279          * These planes have a higher DRM index than the primary planes since
4280          * they should be considered as having a higher z-order.
4281          * Order is reversed to match iteration order in atomic check.
4282          *
4283          * Only support DCN for now, and only expose one so we don't encourage
4284          * userspace to use up all the pipes.
4285          */
4286         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4287                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4288
4289                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4290                         continue;
4291
4292                 if (!plane->blends_with_above || !plane->blends_with_below)
4293                         continue;
4294
4295                 if (!plane->pixel_format_support.argb8888)
4296                         continue;
4297
4298                 if (initialize_plane(dm, NULL, primary_planes + i,
4299                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4300                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4301                         goto fail;
4302                 }
4303
4304                 /* Only create one overlay plane. */
4305                 break;
4306         }
4307
4308         for (i = 0; i < dm->dc->caps.max_streams; i++)
4309                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4310                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4311                         goto fail;
4312                 }
4313
4314         /* Use Outbox interrupt */
4315         switch (adev->ip_versions[DCE_HWIP][0]) {
4316         case IP_VERSION(3, 0, 0):
4317         case IP_VERSION(3, 1, 2):
4318         case IP_VERSION(3, 1, 3):
4319         case IP_VERSION(3, 1, 4):
4320         case IP_VERSION(3, 1, 5):
4321         case IP_VERSION(3, 1, 6):
4322         case IP_VERSION(3, 2, 0):
4323         case IP_VERSION(3, 2, 1):
4324         case IP_VERSION(2, 1, 0):
4325                 if (register_outbox_irq_handlers(dm->adev)) {
4326                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4327                         goto fail;
4328                 }
4329                 break;
4330         default:
4331                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4332                               adev->ip_versions[DCE_HWIP][0]);
4333         }
4334
4335         /* Determine whether to enable PSR support by default. */
4336         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4337                 switch (adev->ip_versions[DCE_HWIP][0]) {
4338                 case IP_VERSION(3, 1, 2):
4339                 case IP_VERSION(3, 1, 3):
4340                 case IP_VERSION(3, 1, 4):
4341                 case IP_VERSION(3, 1, 5):
4342                 case IP_VERSION(3, 1, 6):
4343                 case IP_VERSION(3, 2, 0):
4344                 case IP_VERSION(3, 2, 1):
4345                         psr_feature_enabled = true;
4346                         break;
4347                 default:
4348                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4349                         break;
4350                 }
4351         }
4352
4353         /* loops over all connectors on the board */
4354         for (i = 0; i < link_cnt; i++) {
4355                 struct dc_link *link = NULL;
4356
4357                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4358                         DRM_ERROR(
4359                                 "KMS: Cannot support more than %d display indexes\n",
4360                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4361                         continue;
4362                 }
4363
4364                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4365                 if (!aconnector)
4366                         goto fail;
4367
4368                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4369                 if (!aencoder)
4370                         goto fail;
4371
4372                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4373                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4374                         goto fail;
4375                 }
4376
4377                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4378                         DRM_ERROR("KMS: Failed to initialize connector\n");
4379                         goto fail;
4380                 }
4381
4382                 link = dc_get_link_at_index(dm->dc, i);
4383
4384                 if (!dc_link_detect_sink(link, &new_connection_type))
4385                         DRM_ERROR("KMS: Failed to detect connector\n");
4386
4387                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4388                         emulated_link_detect(link);
4389                         amdgpu_dm_update_connector_after_detect(aconnector);
4390                 } else {
4391                         bool ret = false;
4392
4393                         mutex_lock(&dm->dc_lock);
4394                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4395                         mutex_unlock(&dm->dc_lock);
4396
4397                         if (ret) {
4398                                 amdgpu_dm_update_connector_after_detect(aconnector);
4399                                 register_backlight_device(dm, link);
4400
4401                                 if (dm->num_of_edps)
4402                                         update_connector_ext_caps(aconnector);
4403
4404                                 if (psr_feature_enabled)
4405                                         amdgpu_dm_set_psr_caps(link);
4406
4407                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4408                                  * PSR is also supported.
4409                                  */
4410                                 if (link->psr_settings.psr_feature_enabled)
4411                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4412                         }
4413                 }
4414         }
4415
4416         /* Software is initialized. Now we can register interrupt handlers. */
4417         switch (adev->asic_type) {
4418 #if defined(CONFIG_DRM_AMD_DC_SI)
4419         case CHIP_TAHITI:
4420         case CHIP_PITCAIRN:
4421         case CHIP_VERDE:
4422         case CHIP_OLAND:
4423                 if (dce60_register_irq_handlers(dm->adev)) {
4424                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4425                         goto fail;
4426                 }
4427                 break;
4428 #endif
4429         case CHIP_BONAIRE:
4430         case CHIP_HAWAII:
4431         case CHIP_KAVERI:
4432         case CHIP_KABINI:
4433         case CHIP_MULLINS:
4434         case CHIP_TONGA:
4435         case CHIP_FIJI:
4436         case CHIP_CARRIZO:
4437         case CHIP_STONEY:
4438         case CHIP_POLARIS11:
4439         case CHIP_POLARIS10:
4440         case CHIP_POLARIS12:
4441         case CHIP_VEGAM:
4442         case CHIP_VEGA10:
4443         case CHIP_VEGA12:
4444         case CHIP_VEGA20:
4445                 if (dce110_register_irq_handlers(dm->adev)) {
4446                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4447                         goto fail;
4448                 }
4449                 break;
4450         default:
4451                 switch (adev->ip_versions[DCE_HWIP][0]) {
4452                 case IP_VERSION(1, 0, 0):
4453                 case IP_VERSION(1, 0, 1):
4454                 case IP_VERSION(2, 0, 2):
4455                 case IP_VERSION(2, 0, 3):
4456                 case IP_VERSION(2, 0, 0):
4457                 case IP_VERSION(2, 1, 0):
4458                 case IP_VERSION(3, 0, 0):
4459                 case IP_VERSION(3, 0, 2):
4460                 case IP_VERSION(3, 0, 3):
4461                 case IP_VERSION(3, 0, 1):
4462                 case IP_VERSION(3, 1, 2):
4463                 case IP_VERSION(3, 1, 3):
4464                 case IP_VERSION(3, 1, 4):
4465                 case IP_VERSION(3, 1, 5):
4466                 case IP_VERSION(3, 1, 6):
4467                 case IP_VERSION(3, 2, 0):
4468                 case IP_VERSION(3, 2, 1):
4469                         if (dcn10_register_irq_handlers(dm->adev)) {
4470                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4471                                 goto fail;
4472                         }
4473                         break;
4474                 default:
4475                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4476                                         adev->ip_versions[DCE_HWIP][0]);
4477                         goto fail;
4478                 }
4479                 break;
4480         }
4481
4482         return 0;
4483 fail:
4484         kfree(aencoder);
4485         kfree(aconnector);
4486
4487         return -EINVAL;
4488 }
4489
4490 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4491 {
4492         drm_atomic_private_obj_fini(&dm->atomic_obj);
4493         return;
4494 }
4495
4496 /******************************************************************************
4497  * amdgpu_display_funcs functions
4498  *****************************************************************************/
4499
4500 /*
4501  * dm_bandwidth_update - program display watermarks
4502  *
4503  * @adev: amdgpu_device pointer
4504  *
4505  * Calculate and program the display watermarks and line buffer allocation.
4506  */
4507 static void dm_bandwidth_update(struct amdgpu_device *adev)
4508 {
4509         /* TODO: implement later */
4510 }
4511
4512 static const struct amdgpu_display_funcs dm_display_funcs = {
4513         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4514         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4515         .backlight_set_level = NULL, /* never called for DC */
4516         .backlight_get_level = NULL, /* never called for DC */
4517         .hpd_sense = NULL,/* called unconditionally */
4518         .hpd_set_polarity = NULL, /* called unconditionally */
4519         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4520         .page_flip_get_scanoutpos =
4521                 dm_crtc_get_scanoutpos,/* called unconditionally */
4522         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4523         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4524 };
4525
4526 #if defined(CONFIG_DEBUG_KERNEL_DC)
4527
4528 static ssize_t s3_debug_store(struct device *device,
4529                               struct device_attribute *attr,
4530                               const char *buf,
4531                               size_t count)
4532 {
4533         int ret;
4534         int s3_state;
4535         struct drm_device *drm_dev = dev_get_drvdata(device);
4536         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4537
4538         ret = kstrtoint(buf, 0, &s3_state);
4539
4540         if (ret == 0) {
4541                 if (s3_state) {
4542                         dm_resume(adev);
4543                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4544                 } else
4545                         dm_suspend(adev);
4546         }
4547
4548         return ret == 0 ? count : 0;
4549 }
4550
4551 DEVICE_ATTR_WO(s3_debug);
4552
4553 #endif
4554
4555 static int dm_early_init(void *handle)
4556 {
4557         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4558
4559         switch (adev->asic_type) {
4560 #if defined(CONFIG_DRM_AMD_DC_SI)
4561         case CHIP_TAHITI:
4562         case CHIP_PITCAIRN:
4563         case CHIP_VERDE:
4564                 adev->mode_info.num_crtc = 6;
4565                 adev->mode_info.num_hpd = 6;
4566                 adev->mode_info.num_dig = 6;
4567                 break;
4568         case CHIP_OLAND:
4569                 adev->mode_info.num_crtc = 2;
4570                 adev->mode_info.num_hpd = 2;
4571                 adev->mode_info.num_dig = 2;
4572                 break;
4573 #endif
4574         case CHIP_BONAIRE:
4575         case CHIP_HAWAII:
4576                 adev->mode_info.num_crtc = 6;
4577                 adev->mode_info.num_hpd = 6;
4578                 adev->mode_info.num_dig = 6;
4579                 break;
4580         case CHIP_KAVERI:
4581                 adev->mode_info.num_crtc = 4;
4582                 adev->mode_info.num_hpd = 6;
4583                 adev->mode_info.num_dig = 7;
4584                 break;
4585         case CHIP_KABINI:
4586         case CHIP_MULLINS:
4587                 adev->mode_info.num_crtc = 2;
4588                 adev->mode_info.num_hpd = 6;
4589                 adev->mode_info.num_dig = 6;
4590                 break;
4591         case CHIP_FIJI:
4592         case CHIP_TONGA:
4593                 adev->mode_info.num_crtc = 6;
4594                 adev->mode_info.num_hpd = 6;
4595                 adev->mode_info.num_dig = 7;
4596                 break;
4597         case CHIP_CARRIZO:
4598                 adev->mode_info.num_crtc = 3;
4599                 adev->mode_info.num_hpd = 6;
4600                 adev->mode_info.num_dig = 9;
4601                 break;
4602         case CHIP_STONEY:
4603                 adev->mode_info.num_crtc = 2;
4604                 adev->mode_info.num_hpd = 6;
4605                 adev->mode_info.num_dig = 9;
4606                 break;
4607         case CHIP_POLARIS11:
4608         case CHIP_POLARIS12:
4609                 adev->mode_info.num_crtc = 5;
4610                 adev->mode_info.num_hpd = 5;
4611                 adev->mode_info.num_dig = 5;
4612                 break;
4613         case CHIP_POLARIS10:
4614         case CHIP_VEGAM:
4615                 adev->mode_info.num_crtc = 6;
4616                 adev->mode_info.num_hpd = 6;
4617                 adev->mode_info.num_dig = 6;
4618                 break;
4619         case CHIP_VEGA10:
4620         case CHIP_VEGA12:
4621         case CHIP_VEGA20:
4622                 adev->mode_info.num_crtc = 6;
4623                 adev->mode_info.num_hpd = 6;
4624                 adev->mode_info.num_dig = 6;
4625                 break;
4626         default:
4627
4628                 switch (adev->ip_versions[DCE_HWIP][0]) {
4629                 case IP_VERSION(2, 0, 2):
4630                 case IP_VERSION(3, 0, 0):
4631                         adev->mode_info.num_crtc = 6;
4632                         adev->mode_info.num_hpd = 6;
4633                         adev->mode_info.num_dig = 6;
4634                         break;
4635                 case IP_VERSION(2, 0, 0):
4636                 case IP_VERSION(3, 0, 2):
4637                         adev->mode_info.num_crtc = 5;
4638                         adev->mode_info.num_hpd = 5;
4639                         adev->mode_info.num_dig = 5;
4640                         break;
4641                 case IP_VERSION(2, 0, 3):
4642                 case IP_VERSION(3, 0, 3):
4643                         adev->mode_info.num_crtc = 2;
4644                         adev->mode_info.num_hpd = 2;
4645                         adev->mode_info.num_dig = 2;
4646                         break;
4647                 case IP_VERSION(1, 0, 0):
4648                 case IP_VERSION(1, 0, 1):
4649                 case IP_VERSION(3, 0, 1):
4650                 case IP_VERSION(2, 1, 0):
4651                 case IP_VERSION(3, 1, 2):
4652                 case IP_VERSION(3, 1, 3):
4653                 case IP_VERSION(3, 1, 4):
4654                 case IP_VERSION(3, 1, 5):
4655                 case IP_VERSION(3, 1, 6):
4656                 case IP_VERSION(3, 2, 0):
4657                 case IP_VERSION(3, 2, 1):
4658                         adev->mode_info.num_crtc = 4;
4659                         adev->mode_info.num_hpd = 4;
4660                         adev->mode_info.num_dig = 4;
4661                         break;
4662                 default:
4663                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4664                                         adev->ip_versions[DCE_HWIP][0]);
4665                         return -EINVAL;
4666                 }
4667                 break;
4668         }
4669
4670         amdgpu_dm_set_irq_funcs(adev);
4671
4672         if (adev->mode_info.funcs == NULL)
4673                 adev->mode_info.funcs = &dm_display_funcs;
4674
4675         /*
4676          * Note: Do NOT change adev->audio_endpt_rreg and
4677          * adev->audio_endpt_wreg because they are initialised in
4678          * amdgpu_device_init()
4679          */
4680 #if defined(CONFIG_DEBUG_KERNEL_DC)
4681         device_create_file(
4682                 adev_to_drm(adev)->dev,
4683                 &dev_attr_s3_debug);
4684 #endif
4685
4686         return 0;
4687 }
4688
4689 static bool modeset_required(struct drm_crtc_state *crtc_state,
4690                              struct dc_stream_state *new_stream,
4691                              struct dc_stream_state *old_stream)
4692 {
4693         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4694 }
4695
4696 static bool modereset_required(struct drm_crtc_state *crtc_state)
4697 {
4698         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4699 }
4700
4701 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4702 {
4703         drm_encoder_cleanup(encoder);
4704         kfree(encoder);
4705 }
4706
4707 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4708         .destroy = amdgpu_dm_encoder_destroy,
4709 };
4710
4711
4712 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4713                                          struct drm_framebuffer *fb,
4714                                          int *min_downscale, int *max_upscale)
4715 {
4716         struct amdgpu_device *adev = drm_to_adev(dev);
4717         struct dc *dc = adev->dm.dc;
4718         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4719         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4720
4721         switch (fb->format->format) {
4722         case DRM_FORMAT_P010:
4723         case DRM_FORMAT_NV12:
4724         case DRM_FORMAT_NV21:
4725                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4726                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4727                 break;
4728
4729         case DRM_FORMAT_XRGB16161616F:
4730         case DRM_FORMAT_ARGB16161616F:
4731         case DRM_FORMAT_XBGR16161616F:
4732         case DRM_FORMAT_ABGR16161616F:
4733                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4734                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4735                 break;
4736
4737         default:
4738                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4739                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4740                 break;
4741         }
4742
4743         /*
4744          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4745          * scaling factor of 1.0 == 1000 units.
4746          */
4747         if (*max_upscale == 1)
4748                 *max_upscale = 1000;
4749
4750         if (*min_downscale == 1)
4751                 *min_downscale = 1000;
4752 }
4753
4754
4755 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4756                                 const struct drm_plane_state *state,
4757                                 struct dc_scaling_info *scaling_info)
4758 {
4759         int scale_w, scale_h, min_downscale, max_upscale;
4760
4761         memset(scaling_info, 0, sizeof(*scaling_info));
4762
4763         /* Source is fixed 16.16 but we ignore mantissa for now... */
4764         scaling_info->src_rect.x = state->src_x >> 16;
4765         scaling_info->src_rect.y = state->src_y >> 16;
4766
4767         /*
4768          * For reasons we don't (yet) fully understand a non-zero
4769          * src_y coordinate into an NV12 buffer can cause a
4770          * system hang on DCN1x.
4771          * To avoid hangs (and maybe be overly cautious)
4772          * let's reject both non-zero src_x and src_y.
4773          *
4774          * We currently know of only one use-case to reproduce a
4775          * scenario with non-zero src_x and src_y for NV12, which
4776          * is to gesture the YouTube Android app into full screen
4777          * on ChromeOS.
4778          */
4779         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4780             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4781             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4782             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4783                 return -EINVAL;
4784
4785         scaling_info->src_rect.width = state->src_w >> 16;
4786         if (scaling_info->src_rect.width == 0)
4787                 return -EINVAL;
4788
4789         scaling_info->src_rect.height = state->src_h >> 16;
4790         if (scaling_info->src_rect.height == 0)
4791                 return -EINVAL;
4792
4793         scaling_info->dst_rect.x = state->crtc_x;
4794         scaling_info->dst_rect.y = state->crtc_y;
4795
4796         if (state->crtc_w == 0)
4797                 return -EINVAL;
4798
4799         scaling_info->dst_rect.width = state->crtc_w;
4800
4801         if (state->crtc_h == 0)
4802                 return -EINVAL;
4803
4804         scaling_info->dst_rect.height = state->crtc_h;
4805
4806         /* DRM doesn't specify clipping on destination output. */
4807         scaling_info->clip_rect = scaling_info->dst_rect;
4808
4809         /* Validate scaling per-format with DC plane caps */
4810         if (state->plane && state->plane->dev && state->fb) {
4811                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4812                                              &min_downscale, &max_upscale);
4813         } else {
4814                 min_downscale = 250;
4815                 max_upscale = 16000;
4816         }
4817
4818         scale_w = scaling_info->dst_rect.width * 1000 /
4819                   scaling_info->src_rect.width;
4820
4821         if (scale_w < min_downscale || scale_w > max_upscale)
4822                 return -EINVAL;
4823
4824         scale_h = scaling_info->dst_rect.height * 1000 /
4825                   scaling_info->src_rect.height;
4826
4827         if (scale_h < min_downscale || scale_h > max_upscale)
4828                 return -EINVAL;
4829
4830         /*
4831          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4832          * assume reasonable defaults based on the format.
4833          */
4834
4835         return 0;
4836 }
4837
4838 static void
4839 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4840                                  uint64_t tiling_flags)
4841 {
4842         /* Fill GFX8 params */
4843         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4844                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4845
4846                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4847                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4848                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4849                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4850                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4851
4852                 /* XXX fix me for VI */
4853                 tiling_info->gfx8.num_banks = num_banks;
4854                 tiling_info->gfx8.array_mode =
4855                                 DC_ARRAY_2D_TILED_THIN1;
4856                 tiling_info->gfx8.tile_split = tile_split;
4857                 tiling_info->gfx8.bank_width = bankw;
4858                 tiling_info->gfx8.bank_height = bankh;
4859                 tiling_info->gfx8.tile_aspect = mtaspect;
4860                 tiling_info->gfx8.tile_mode =
4861                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4862         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4863                         == DC_ARRAY_1D_TILED_THIN1) {
4864                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4865         }
4866
4867         tiling_info->gfx8.pipe_config =
4868                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4869 }
4870
4871 static void
4872 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4873                                   union dc_tiling_info *tiling_info)
4874 {
4875         tiling_info->gfx9.num_pipes =
4876                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4877         tiling_info->gfx9.num_banks =
4878                 adev->gfx.config.gb_addr_config_fields.num_banks;
4879         tiling_info->gfx9.pipe_interleave =
4880                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4881         tiling_info->gfx9.num_shader_engines =
4882                 adev->gfx.config.gb_addr_config_fields.num_se;
4883         tiling_info->gfx9.max_compressed_frags =
4884                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4885         tiling_info->gfx9.num_rb_per_se =
4886                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4887         tiling_info->gfx9.shaderEnable = 1;
4888         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4889                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4890 }
4891
4892 static int
4893 validate_dcc(struct amdgpu_device *adev,
4894              const enum surface_pixel_format format,
4895              const enum dc_rotation_angle rotation,
4896              const union dc_tiling_info *tiling_info,
4897              const struct dc_plane_dcc_param *dcc,
4898              const struct dc_plane_address *address,
4899              const struct plane_size *plane_size)
4900 {
4901         struct dc *dc = adev->dm.dc;
4902         struct dc_dcc_surface_param input;
4903         struct dc_surface_dcc_cap output;
4904
4905         memset(&input, 0, sizeof(input));
4906         memset(&output, 0, sizeof(output));
4907
4908         if (!dcc->enable)
4909                 return 0;
4910
4911         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4912             !dc->cap_funcs.get_dcc_compression_cap)
4913                 return -EINVAL;
4914
4915         input.format = format;
4916         input.surface_size.width = plane_size->surface_size.width;
4917         input.surface_size.height = plane_size->surface_size.height;
4918         input.swizzle_mode = tiling_info->gfx9.swizzle;
4919
4920         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4921                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4922         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4923                 input.scan = SCAN_DIRECTION_VERTICAL;
4924
4925         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4926                 return -EINVAL;
4927
4928         if (!output.capable)
4929                 return -EINVAL;
4930
4931         if (dcc->independent_64b_blks == 0 &&
4932             output.grph.rgb.independent_64b_blks != 0)
4933                 return -EINVAL;
4934
4935         return 0;
4936 }
4937
4938 static bool
4939 modifier_has_dcc(uint64_t modifier)
4940 {
4941         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4942 }
4943
4944 static unsigned
4945 modifier_gfx9_swizzle_mode(uint64_t modifier)
4946 {
4947         if (modifier == DRM_FORMAT_MOD_LINEAR)
4948                 return 0;
4949
4950         return AMD_FMT_MOD_GET(TILE, modifier);
4951 }
4952
4953 static const struct drm_format_info *
4954 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4955 {
4956         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4957 }
4958
4959 static void
4960 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4961                                     union dc_tiling_info *tiling_info,
4962                                     uint64_t modifier)
4963 {
4964         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4965         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4966         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4967         unsigned int pipes_log2;
4968
4969         pipes_log2 = min(5u, mod_pipe_xor_bits);
4970
4971         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4972
4973         if (!IS_AMD_FMT_MOD(modifier))
4974                 return;
4975
4976         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4977         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4978
4979         if (adev->family >= AMDGPU_FAMILY_NV) {
4980                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4981         } else {
4982                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4983
4984                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4985         }
4986 }
4987
4988 enum dm_micro_swizzle {
4989         MICRO_SWIZZLE_Z = 0,
4990         MICRO_SWIZZLE_S = 1,
4991         MICRO_SWIZZLE_D = 2,
4992         MICRO_SWIZZLE_R = 3
4993 };
4994
4995 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4996                                           uint32_t format,
4997                                           uint64_t modifier)
4998 {
4999         struct amdgpu_device *adev = drm_to_adev(plane->dev);
5000         const struct drm_format_info *info = drm_format_info(format);
5001         int i;
5002
5003         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
5004
5005         if (!info)
5006                 return false;
5007
5008         /*
5009          * We always have to allow these modifiers:
5010          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
5011          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
5012          */
5013         if (modifier == DRM_FORMAT_MOD_LINEAR ||
5014             modifier == DRM_FORMAT_MOD_INVALID) {
5015                 return true;
5016         }
5017
5018         /* Check that the modifier is on the list of the plane's supported modifiers. */
5019         for (i = 0; i < plane->modifier_count; i++) {
5020                 if (modifier == plane->modifiers[i])
5021                         break;
5022         }
5023         if (i == plane->modifier_count)
5024                 return false;
5025
5026         /*
5027          * For D swizzle the canonical modifier depends on the bpp, so check
5028          * it here.
5029          */
5030         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5031             adev->family >= AMDGPU_FAMILY_NV) {
5032                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5033                         return false;
5034         }
5035
5036         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5037             info->cpp[0] < 8)
5038                 return false;
5039
5040         if (modifier_has_dcc(modifier)) {
5041                 /* Per radeonsi comments 16/64 bpp are more complicated. */
5042                 if (info->cpp[0] != 4)
5043                         return false;
5044                 /* We support multi-planar formats, but not when combined with
5045                  * additional DCC metadata planes. */
5046                 if (info->num_planes > 1)
5047                         return false;
5048         }
5049
5050         return true;
5051 }
5052
5053 static void
5054 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5055 {
5056         if (!*mods)
5057                 return;
5058
5059         if (*cap - *size < 1) {
5060                 uint64_t new_cap = *cap * 2;
5061                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5062
5063                 if (!new_mods) {
5064                         kfree(*mods);
5065                         *mods = NULL;
5066                         return;
5067                 }
5068
5069                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5070                 kfree(*mods);
5071                 *mods = new_mods;
5072                 *cap = new_cap;
5073         }
5074
5075         (*mods)[*size] = mod;
5076         *size += 1;
5077 }
5078
5079 static void
5080 add_gfx9_modifiers(const struct amdgpu_device *adev,
5081                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5082 {
5083         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5084         int pipe_xor_bits = min(8, pipes +
5085                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5086         int bank_xor_bits = min(8 - pipe_xor_bits,
5087                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5088         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5089                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5090
5091
5092         if (adev->family == AMDGPU_FAMILY_RV) {
5093                 /* Raven2 and later */
5094                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5095
5096                 /*
5097                  * No _D DCC swizzles yet because we only allow 32bpp, which
5098                  * doesn't support _D on DCN
5099                  */
5100
5101                 if (has_constant_encode) {
5102                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5104                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5105                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5107                                     AMD_FMT_MOD_SET(DCC, 1) |
5108                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5109                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5110                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5111                 }
5112
5113                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5115                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5116                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5117                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5118                             AMD_FMT_MOD_SET(DCC, 1) |
5119                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5120                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5121                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5122
5123                 if (has_constant_encode) {
5124                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5126                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5127                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5129                                     AMD_FMT_MOD_SET(DCC, 1) |
5130                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5132                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5133
5134                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135                                     AMD_FMT_MOD_SET(RB, rb) |
5136                                     AMD_FMT_MOD_SET(PIPE, pipes));
5137                 }
5138
5139                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5140                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5141                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5142                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5143                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5144                             AMD_FMT_MOD_SET(DCC, 1) |
5145                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5146                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5147                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5148                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5149                             AMD_FMT_MOD_SET(RB, rb) |
5150                             AMD_FMT_MOD_SET(PIPE, pipes));
5151         }
5152
5153         /*
5154          * Only supported for 64bpp on Raven, will be filtered on format in
5155          * dm_plane_format_mod_supported.
5156          */
5157         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5159                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5160                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5162
5163         if (adev->family == AMDGPU_FAMILY_RV) {
5164                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5165                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5166                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5167                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5168                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5169         }
5170
5171         /*
5172          * Only supported for 64bpp on Raven, will be filtered on format in
5173          * dm_plane_format_mod_supported.
5174          */
5175         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5177                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5178
5179         if (adev->family == AMDGPU_FAMILY_RV) {
5180                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5182                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5183         }
5184 }
5185
5186 static void
5187 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5188                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5189 {
5190         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5191
5192         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5194                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5195                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5196                     AMD_FMT_MOD_SET(DCC, 1) |
5197                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5198                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5199                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5200
5201         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5202                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5203                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5204                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5205                     AMD_FMT_MOD_SET(DCC, 1) |
5206                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5207                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5208                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5209                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5210
5211         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5212                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5213                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5214                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5215
5216         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5218                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5219                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5220
5221
5222         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5223         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5224                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5225                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5226
5227         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5229                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5230 }
5231
5232 static void
5233 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5234                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5235 {
5236         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5237         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5238
5239         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5240                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5241                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5242                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5243                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5244                     AMD_FMT_MOD_SET(DCC, 1) |
5245                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5246                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5247                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5248                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5249
5250         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5251                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5252                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5253                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5254                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5255                     AMD_FMT_MOD_SET(DCC, 1) |
5256                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5257                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5258                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5259
5260         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5261                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5262                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5263                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5264                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5265                     AMD_FMT_MOD_SET(DCC, 1) |
5266                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5267                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5268                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5269                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5270                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5271
5272         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5273                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5274                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5275                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5276                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5277                     AMD_FMT_MOD_SET(DCC, 1) |
5278                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5279                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5280                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5281                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5282
5283         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5284                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5285                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5286                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5287                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5288
5289         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5290                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5291                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5292                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5293                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5294
5295         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5296         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5297                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5298                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5299
5300         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5301                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5302                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5303 }
5304
5305 static void
5306 add_gfx11_modifiers(struct amdgpu_device *adev,
5307                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5308 {
5309         int num_pipes = 0;
5310         int pipe_xor_bits = 0;
5311         int num_pkrs = 0;
5312         int pkrs = 0;
5313         u32 gb_addr_config;
5314         u8 i = 0;
5315         unsigned swizzle_r_x;
5316         uint64_t modifier_r_x;
5317         uint64_t modifier_dcc_best;
5318         uint64_t modifier_dcc_4k;
5319
5320         /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5321          * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5322         gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5323         ASSERT(gb_addr_config != 0);
5324
5325         num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5326         pkrs = ilog2(num_pkrs);
5327         num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5328         pipe_xor_bits = ilog2(num_pipes);
5329
5330         for (i = 0; i < 2; i++) {
5331                 /* Insert the best one first. */
5332                 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5333                 if (num_pipes > 16)
5334                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5335                 else
5336                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5337
5338                 modifier_r_x = AMD_FMT_MOD |
5339                                AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5340                                AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5341                                AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5342                                AMD_FMT_MOD_SET(PACKERS, pkrs);
5343
5344                 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5345                 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5346                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5347                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5348                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5349
5350                 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5351                 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5352                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5353                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5354                                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5355
5356                 add_modifier(mods, size, capacity, modifier_dcc_best);
5357                 add_modifier(mods, size, capacity, modifier_dcc_4k);
5358
5359                 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5360                 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5361
5362                 add_modifier(mods, size, capacity, modifier_r_x);
5363         }
5364
5365         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5366              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5367                          AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5368 }
5369
5370 static int
5371 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5372 {
5373         uint64_t size = 0, capacity = 128;
5374         *mods = NULL;
5375
5376         /* We have not hooked up any pre-GFX9 modifiers. */
5377         if (adev->family < AMDGPU_FAMILY_AI)
5378                 return 0;
5379
5380         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5381
5382         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5383                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5384                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5385                 return *mods ? 0 : -ENOMEM;
5386         }
5387
5388         switch (adev->family) {
5389         case AMDGPU_FAMILY_AI:
5390         case AMDGPU_FAMILY_RV:
5391                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5392                 break;
5393         case AMDGPU_FAMILY_NV:
5394         case AMDGPU_FAMILY_VGH:
5395         case AMDGPU_FAMILY_YC:
5396         case AMDGPU_FAMILY_GC_10_3_6:
5397         case AMDGPU_FAMILY_GC_10_3_7:
5398                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5399                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5400                 else
5401                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5402                 break;
5403         case AMDGPU_FAMILY_GC_11_0_0:
5404         case AMDGPU_FAMILY_GC_11_0_2:
5405                 add_gfx11_modifiers(adev, mods, &size, &capacity);
5406                 break;
5407         }
5408
5409         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5410
5411         /* INVALID marks the end of the list. */
5412         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5413
5414         if (!*mods)
5415                 return -ENOMEM;
5416
5417         return 0;
5418 }
5419
5420 static int
5421 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5422                                           const struct amdgpu_framebuffer *afb,
5423                                           const enum surface_pixel_format format,
5424                                           const enum dc_rotation_angle rotation,
5425                                           const struct plane_size *plane_size,
5426                                           union dc_tiling_info *tiling_info,
5427                                           struct dc_plane_dcc_param *dcc,
5428                                           struct dc_plane_address *address,
5429                                           const bool force_disable_dcc)
5430 {
5431         const uint64_t modifier = afb->base.modifier;
5432         int ret = 0;
5433
5434         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5435         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5436
5437         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5438                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5439                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5440                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5441
5442                 dcc->enable = 1;
5443                 dcc->meta_pitch = afb->base.pitches[1];
5444                 dcc->independent_64b_blks = independent_64b_blks;
5445                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5446                         if (independent_64b_blks && independent_128b_blks)
5447                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5448                         else if (independent_128b_blks)
5449                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5450                         else if (independent_64b_blks && !independent_128b_blks)
5451                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5452                         else
5453                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5454                 } else {
5455                         if (independent_64b_blks)
5456                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5457                         else
5458                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5459                 }
5460
5461                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5462                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5463         }
5464
5465         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5466         if (ret)
5467                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5468
5469         return ret;
5470 }
5471
5472 static int
5473 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5474                              const struct amdgpu_framebuffer *afb,
5475                              const enum surface_pixel_format format,
5476                              const enum dc_rotation_angle rotation,
5477                              const uint64_t tiling_flags,
5478                              union dc_tiling_info *tiling_info,
5479                              struct plane_size *plane_size,
5480                              struct dc_plane_dcc_param *dcc,
5481                              struct dc_plane_address *address,
5482                              bool tmz_surface,
5483                              bool force_disable_dcc)
5484 {
5485         const struct drm_framebuffer *fb = &afb->base;
5486         int ret;
5487
5488         memset(tiling_info, 0, sizeof(*tiling_info));
5489         memset(plane_size, 0, sizeof(*plane_size));
5490         memset(dcc, 0, sizeof(*dcc));
5491         memset(address, 0, sizeof(*address));
5492
5493         address->tmz_surface = tmz_surface;
5494
5495         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5496                 uint64_t addr = afb->address + fb->offsets[0];
5497
5498                 plane_size->surface_size.x = 0;
5499                 plane_size->surface_size.y = 0;
5500                 plane_size->surface_size.width = fb->width;
5501                 plane_size->surface_size.height = fb->height;
5502                 plane_size->surface_pitch =
5503                         fb->pitches[0] / fb->format->cpp[0];
5504
5505                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5506                 address->grph.addr.low_part = lower_32_bits(addr);
5507                 address->grph.addr.high_part = upper_32_bits(addr);
5508         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5509                 uint64_t luma_addr = afb->address + fb->offsets[0];
5510                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5511
5512                 plane_size->surface_size.x = 0;
5513                 plane_size->surface_size.y = 0;
5514                 plane_size->surface_size.width = fb->width;
5515                 plane_size->surface_size.height = fb->height;
5516                 plane_size->surface_pitch =
5517                         fb->pitches[0] / fb->format->cpp[0];
5518
5519                 plane_size->chroma_size.x = 0;
5520                 plane_size->chroma_size.y = 0;
5521                 /* TODO: set these based on surface format */
5522                 plane_size->chroma_size.width = fb->width / 2;
5523                 plane_size->chroma_size.height = fb->height / 2;
5524
5525                 plane_size->chroma_pitch =
5526                         fb->pitches[1] / fb->format->cpp[1];
5527
5528                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5529                 address->video_progressive.luma_addr.low_part =
5530                         lower_32_bits(luma_addr);
5531                 address->video_progressive.luma_addr.high_part =
5532                         upper_32_bits(luma_addr);
5533                 address->video_progressive.chroma_addr.low_part =
5534                         lower_32_bits(chroma_addr);
5535                 address->video_progressive.chroma_addr.high_part =
5536                         upper_32_bits(chroma_addr);
5537         }
5538
5539         if (adev->family >= AMDGPU_FAMILY_AI) {
5540                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5541                                                                 rotation, plane_size,
5542                                                                 tiling_info, dcc,
5543                                                                 address,
5544                                                                 force_disable_dcc);
5545                 if (ret)
5546                         return ret;
5547         } else {
5548                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5549         }
5550
5551         return 0;
5552 }
5553
5554 static void
5555 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5556                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5557                                bool *global_alpha, int *global_alpha_value)
5558 {
5559         *per_pixel_alpha = false;
5560         *pre_multiplied_alpha = true;
5561         *global_alpha = false;
5562         *global_alpha_value = 0xff;
5563
5564         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5565                 return;
5566
5567         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5568                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5569                 static const uint32_t alpha_formats[] = {
5570                         DRM_FORMAT_ARGB8888,
5571                         DRM_FORMAT_RGBA8888,
5572                         DRM_FORMAT_ABGR8888,
5573                 };
5574                 uint32_t format = plane_state->fb->format->format;
5575                 unsigned int i;
5576
5577                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5578                         if (format == alpha_formats[i]) {
5579                                 *per_pixel_alpha = true;
5580                                 break;
5581                         }
5582                 }
5583
5584                 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5585                         *pre_multiplied_alpha = false;
5586         }
5587
5588         if (plane_state->alpha < 0xffff) {
5589                 *global_alpha = true;
5590                 *global_alpha_value = plane_state->alpha >> 8;
5591         }
5592 }
5593
5594 static int
5595 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5596                             const enum surface_pixel_format format,
5597                             enum dc_color_space *color_space)
5598 {
5599         bool full_range;
5600
5601         *color_space = COLOR_SPACE_SRGB;
5602
5603         /* DRM color properties only affect non-RGB formats. */
5604         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5605                 return 0;
5606
5607         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5608
5609         switch (plane_state->color_encoding) {
5610         case DRM_COLOR_YCBCR_BT601:
5611                 if (full_range)
5612                         *color_space = COLOR_SPACE_YCBCR601;
5613                 else
5614                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5615                 break;
5616
5617         case DRM_COLOR_YCBCR_BT709:
5618                 if (full_range)
5619                         *color_space = COLOR_SPACE_YCBCR709;
5620                 else
5621                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5622                 break;
5623
5624         case DRM_COLOR_YCBCR_BT2020:
5625                 if (full_range)
5626                         *color_space = COLOR_SPACE_2020_YCBCR;
5627                 else
5628                         return -EINVAL;
5629                 break;
5630
5631         default:
5632                 return -EINVAL;
5633         }
5634
5635         return 0;
5636 }
5637
5638 static int
5639 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5640                             const struct drm_plane_state *plane_state,
5641                             const uint64_t tiling_flags,
5642                             struct dc_plane_info *plane_info,
5643                             struct dc_plane_address *address,
5644                             bool tmz_surface,
5645                             bool force_disable_dcc)
5646 {
5647         const struct drm_framebuffer *fb = plane_state->fb;
5648         const struct amdgpu_framebuffer *afb =
5649                 to_amdgpu_framebuffer(plane_state->fb);
5650         int ret;
5651
5652         memset(plane_info, 0, sizeof(*plane_info));
5653
5654         switch (fb->format->format) {
5655         case DRM_FORMAT_C8:
5656                 plane_info->format =
5657                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5658                 break;
5659         case DRM_FORMAT_RGB565:
5660                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5661                 break;
5662         case DRM_FORMAT_XRGB8888:
5663         case DRM_FORMAT_ARGB8888:
5664                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5665                 break;
5666         case DRM_FORMAT_XRGB2101010:
5667         case DRM_FORMAT_ARGB2101010:
5668                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5669                 break;
5670         case DRM_FORMAT_XBGR2101010:
5671         case DRM_FORMAT_ABGR2101010:
5672                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5673                 break;
5674         case DRM_FORMAT_XBGR8888:
5675         case DRM_FORMAT_ABGR8888:
5676                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5677                 break;
5678         case DRM_FORMAT_NV21:
5679                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5680                 break;
5681         case DRM_FORMAT_NV12:
5682                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5683                 break;
5684         case DRM_FORMAT_P010:
5685                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5686                 break;
5687         case DRM_FORMAT_XRGB16161616F:
5688         case DRM_FORMAT_ARGB16161616F:
5689                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5690                 break;
5691         case DRM_FORMAT_XBGR16161616F:
5692         case DRM_FORMAT_ABGR16161616F:
5693                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5694                 break;
5695         case DRM_FORMAT_XRGB16161616:
5696         case DRM_FORMAT_ARGB16161616:
5697                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5698                 break;
5699         case DRM_FORMAT_XBGR16161616:
5700         case DRM_FORMAT_ABGR16161616:
5701                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5702                 break;
5703         default:
5704                 DRM_ERROR(
5705                         "Unsupported screen format %p4cc\n",
5706                         &fb->format->format);
5707                 return -EINVAL;
5708         }
5709
5710         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5711         case DRM_MODE_ROTATE_0:
5712                 plane_info->rotation = ROTATION_ANGLE_0;
5713                 break;
5714         case DRM_MODE_ROTATE_90:
5715                 plane_info->rotation = ROTATION_ANGLE_90;
5716                 break;
5717         case DRM_MODE_ROTATE_180:
5718                 plane_info->rotation = ROTATION_ANGLE_180;
5719                 break;
5720         case DRM_MODE_ROTATE_270:
5721                 plane_info->rotation = ROTATION_ANGLE_270;
5722                 break;
5723         default:
5724                 plane_info->rotation = ROTATION_ANGLE_0;
5725                 break;
5726         }
5727
5728         plane_info->visible = true;
5729         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5730
5731         plane_info->layer_index = 0;
5732
5733         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5734                                           &plane_info->color_space);
5735         if (ret)
5736                 return ret;
5737
5738         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5739                                            plane_info->rotation, tiling_flags,
5740                                            &plane_info->tiling_info,
5741                                            &plane_info->plane_size,
5742                                            &plane_info->dcc, address, tmz_surface,
5743                                            force_disable_dcc);
5744         if (ret)
5745                 return ret;
5746
5747         fill_blending_from_plane_state(
5748                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5749                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5750
5751         return 0;
5752 }
5753
5754 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5755                                     struct dc_plane_state *dc_plane_state,
5756                                     struct drm_plane_state *plane_state,
5757                                     struct drm_crtc_state *crtc_state)
5758 {
5759         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5760         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5761         struct dc_scaling_info scaling_info;
5762         struct dc_plane_info plane_info;
5763         int ret;
5764         bool force_disable_dcc = false;
5765
5766         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5767         if (ret)
5768                 return ret;
5769
5770         dc_plane_state->src_rect = scaling_info.src_rect;
5771         dc_plane_state->dst_rect = scaling_info.dst_rect;
5772         dc_plane_state->clip_rect = scaling_info.clip_rect;
5773         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5774
5775         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5776         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5777                                           afb->tiling_flags,
5778                                           &plane_info,
5779                                           &dc_plane_state->address,
5780                                           afb->tmz_surface,
5781                                           force_disable_dcc);
5782         if (ret)
5783                 return ret;
5784
5785         dc_plane_state->format = plane_info.format;
5786         dc_plane_state->color_space = plane_info.color_space;
5787         dc_plane_state->format = plane_info.format;
5788         dc_plane_state->plane_size = plane_info.plane_size;
5789         dc_plane_state->rotation = plane_info.rotation;
5790         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5791         dc_plane_state->stereo_format = plane_info.stereo_format;
5792         dc_plane_state->tiling_info = plane_info.tiling_info;
5793         dc_plane_state->visible = plane_info.visible;
5794         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5795         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5796         dc_plane_state->global_alpha = plane_info.global_alpha;
5797         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5798         dc_plane_state->dcc = plane_info.dcc;
5799         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5800         dc_plane_state->flip_int_enabled = true;
5801
5802         /*
5803          * Always set input transfer function, since plane state is refreshed
5804          * every time.
5805          */
5806         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5807         if (ret)
5808                 return ret;
5809
5810         return 0;
5811 }
5812
5813 /**
5814  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5815  *
5816  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5817  *         remote fb
5818  * @old_plane_state: Old state of @plane
5819  * @new_plane_state: New state of @plane
5820  * @crtc_state: New state of CRTC connected to the @plane
5821  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5822  *
5823  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5824  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5825  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5826  * amdgpu_dm's.
5827  *
5828  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5829  * plane with regions that require flushing to the eDP remote buffer. In
5830  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5831  * implicitly provide damage clips without any client support via the plane
5832  * bounds.
5833  *
5834  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5835  *
5836  * TODO: Also enable for FB_DAMAGE_CLIPS
5837  */
5838 static void fill_dc_dirty_rects(struct drm_plane *plane,
5839                                 struct drm_plane_state *old_plane_state,
5840                                 struct drm_plane_state *new_plane_state,
5841                                 struct drm_crtc_state *crtc_state,
5842                                 struct dc_flip_addrs *flip_addrs)
5843 {
5844         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5845         struct rect *dirty_rects = flip_addrs->dirty_rects;
5846         uint32_t num_clips;
5847         bool bb_changed;
5848         bool fb_changed;
5849         uint32_t i = 0;
5850
5851         flip_addrs->dirty_rect_count = 0;
5852
5853         /*
5854          * Cursor plane has it's own dirty rect update interface. See
5855          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5856          */
5857         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5858                 return;
5859
5860         /*
5861          * Today, we only consider MPO use-case for PSR SU. If MPO not
5862          * requested, and there is a plane update, do FFU.
5863          */
5864         if (!dm_crtc_state->mpo_requested) {
5865                 dirty_rects[0].x = 0;
5866                 dirty_rects[0].y = 0;
5867                 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5868                 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5869                 flip_addrs->dirty_rect_count = 1;
5870                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5871                                  new_plane_state->plane->base.id,
5872                                  dm_crtc_state->base.mode.crtc_hdisplay,
5873                                  dm_crtc_state->base.mode.crtc_vdisplay);
5874                 return;
5875         }
5876
5877         /*
5878          * MPO is requested. Add entire plane bounding box to dirty rects if
5879          * flipped to or damaged.
5880          *
5881          * If plane is moved or resized, also add old bounding box to dirty
5882          * rects.
5883          */
5884         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5885         fb_changed = old_plane_state->fb->base.id !=
5886                      new_plane_state->fb->base.id;
5887         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5888                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5889                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5890                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5891
5892         DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5893                          new_plane_state->plane->base.id,
5894                          bb_changed, fb_changed, num_clips);
5895
5896         if (num_clips || fb_changed || bb_changed) {
5897                 dirty_rects[i].x = new_plane_state->crtc_x;
5898                 dirty_rects[i].y = new_plane_state->crtc_y;
5899                 dirty_rects[i].width = new_plane_state->crtc_w;
5900                 dirty_rects[i].height = new_plane_state->crtc_h;
5901                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5902                                  new_plane_state->plane->base.id,
5903                                  dirty_rects[i].x, dirty_rects[i].y,
5904                                  dirty_rects[i].width, dirty_rects[i].height);
5905                 i += 1;
5906         }
5907
5908         /* Add old plane bounding-box if plane is moved or resized */
5909         if (bb_changed) {
5910                 dirty_rects[i].x = old_plane_state->crtc_x;
5911                 dirty_rects[i].y = old_plane_state->crtc_y;
5912                 dirty_rects[i].width = old_plane_state->crtc_w;
5913                 dirty_rects[i].height = old_plane_state->crtc_h;
5914                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5915                                 old_plane_state->plane->base.id,
5916                                 dirty_rects[i].x, dirty_rects[i].y,
5917                                 dirty_rects[i].width, dirty_rects[i].height);
5918                 i += 1;
5919         }
5920
5921         flip_addrs->dirty_rect_count = i;
5922 }
5923
5924 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5925                                            const struct dm_connector_state *dm_state,
5926                                            struct dc_stream_state *stream)
5927 {
5928         enum amdgpu_rmx_type rmx_type;
5929
5930         struct rect src = { 0 }; /* viewport in composition space*/
5931         struct rect dst = { 0 }; /* stream addressable area */
5932
5933         /* no mode. nothing to be done */
5934         if (!mode)
5935                 return;
5936
5937         /* Full screen scaling by default */
5938         src.width = mode->hdisplay;
5939         src.height = mode->vdisplay;
5940         dst.width = stream->timing.h_addressable;
5941         dst.height = stream->timing.v_addressable;
5942
5943         if (dm_state) {
5944                 rmx_type = dm_state->scaling;
5945                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5946                         if (src.width * dst.height <
5947                                         src.height * dst.width) {
5948                                 /* height needs less upscaling/more downscaling */
5949                                 dst.width = src.width *
5950                                                 dst.height / src.height;
5951                         } else {
5952                                 /* width needs less upscaling/more downscaling */
5953                                 dst.height = src.height *
5954                                                 dst.width / src.width;
5955                         }
5956                 } else if (rmx_type == RMX_CENTER) {
5957                         dst = src;
5958                 }
5959
5960                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5961                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5962
5963                 if (dm_state->underscan_enable) {
5964                         dst.x += dm_state->underscan_hborder / 2;
5965                         dst.y += dm_state->underscan_vborder / 2;
5966                         dst.width -= dm_state->underscan_hborder;
5967                         dst.height -= dm_state->underscan_vborder;
5968                 }
5969         }
5970
5971         stream->src = src;
5972         stream->dst = dst;
5973
5974         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5975                       dst.x, dst.y, dst.width, dst.height);
5976
5977 }
5978
5979 static enum dc_color_depth
5980 convert_color_depth_from_display_info(const struct drm_connector *connector,
5981                                       bool is_y420, int requested_bpc)
5982 {
5983         uint8_t bpc;
5984
5985         if (is_y420) {
5986                 bpc = 8;
5987
5988                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5989                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5990                         bpc = 16;
5991                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5992                         bpc = 12;
5993                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5994                         bpc = 10;
5995         } else {
5996                 bpc = (uint8_t)connector->display_info.bpc;
5997                 /* Assume 8 bpc by default if no bpc is specified. */
5998                 bpc = bpc ? bpc : 8;
5999         }
6000
6001         if (requested_bpc > 0) {
6002                 /*
6003                  * Cap display bpc based on the user requested value.
6004                  *
6005                  * The value for state->max_bpc may not correctly updated
6006                  * depending on when the connector gets added to the state
6007                  * or if this was called outside of atomic check, so it
6008                  * can't be used directly.
6009                  */
6010                 bpc = min_t(u8, bpc, requested_bpc);
6011
6012                 /* Round down to the nearest even number. */
6013                 bpc = bpc - (bpc & 1);
6014         }
6015
6016         switch (bpc) {
6017         case 0:
6018                 /*
6019                  * Temporary Work around, DRM doesn't parse color depth for
6020                  * EDID revision before 1.4
6021                  * TODO: Fix edid parsing
6022                  */
6023                 return COLOR_DEPTH_888;
6024         case 6:
6025                 return COLOR_DEPTH_666;
6026         case 8:
6027                 return COLOR_DEPTH_888;
6028         case 10:
6029                 return COLOR_DEPTH_101010;
6030         case 12:
6031                 return COLOR_DEPTH_121212;
6032         case 14:
6033                 return COLOR_DEPTH_141414;
6034         case 16:
6035                 return COLOR_DEPTH_161616;
6036         default:
6037                 return COLOR_DEPTH_UNDEFINED;
6038         }
6039 }
6040
6041 static enum dc_aspect_ratio
6042 get_aspect_ratio(const struct drm_display_mode *mode_in)
6043 {
6044         /* 1-1 mapping, since both enums follow the HDMI spec. */
6045         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6046 }
6047
6048 static enum dc_color_space
6049 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6050 {
6051         enum dc_color_space color_space = COLOR_SPACE_SRGB;
6052
6053         switch (dc_crtc_timing->pixel_encoding) {
6054         case PIXEL_ENCODING_YCBCR422:
6055         case PIXEL_ENCODING_YCBCR444:
6056         case PIXEL_ENCODING_YCBCR420:
6057         {
6058                 /*
6059                  * 27030khz is the separation point between HDTV and SDTV
6060                  * according to HDMI spec, we use YCbCr709 and YCbCr601
6061                  * respectively
6062                  */
6063                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
6064                         if (dc_crtc_timing->flags.Y_ONLY)
6065                                 color_space =
6066                                         COLOR_SPACE_YCBCR709_LIMITED;
6067                         else
6068                                 color_space = COLOR_SPACE_YCBCR709;
6069                 } else {
6070                         if (dc_crtc_timing->flags.Y_ONLY)
6071                                 color_space =
6072                                         COLOR_SPACE_YCBCR601_LIMITED;
6073                         else
6074                                 color_space = COLOR_SPACE_YCBCR601;
6075                 }
6076
6077         }
6078         break;
6079         case PIXEL_ENCODING_RGB:
6080                 color_space = COLOR_SPACE_SRGB;
6081                 break;
6082
6083         default:
6084                 WARN_ON(1);
6085                 break;
6086         }
6087
6088         return color_space;
6089 }
6090
6091 static bool adjust_colour_depth_from_display_info(
6092         struct dc_crtc_timing *timing_out,
6093         const struct drm_display_info *info)
6094 {
6095         enum dc_color_depth depth = timing_out->display_color_depth;
6096         int normalized_clk;
6097         do {
6098                 normalized_clk = timing_out->pix_clk_100hz / 10;
6099                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6100                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6101                         normalized_clk /= 2;
6102                 /* Adjusting pix clock following on HDMI spec based on colour depth */
6103                 switch (depth) {
6104                 case COLOR_DEPTH_888:
6105                         break;
6106                 case COLOR_DEPTH_101010:
6107                         normalized_clk = (normalized_clk * 30) / 24;
6108                         break;
6109                 case COLOR_DEPTH_121212:
6110                         normalized_clk = (normalized_clk * 36) / 24;
6111                         break;
6112                 case COLOR_DEPTH_161616:
6113                         normalized_clk = (normalized_clk * 48) / 24;
6114                         break;
6115                 default:
6116                         /* The above depths are the only ones valid for HDMI. */
6117                         return false;
6118                 }
6119                 if (normalized_clk <= info->max_tmds_clock) {
6120                         timing_out->display_color_depth = depth;
6121                         return true;
6122                 }
6123         } while (--depth > COLOR_DEPTH_666);
6124         return false;
6125 }
6126
6127 static void fill_stream_properties_from_drm_display_mode(
6128         struct dc_stream_state *stream,
6129         const struct drm_display_mode *mode_in,
6130         const struct drm_connector *connector,
6131         const struct drm_connector_state *connector_state,
6132         const struct dc_stream_state *old_stream,
6133         int requested_bpc)
6134 {
6135         struct dc_crtc_timing *timing_out = &stream->timing;
6136         const struct drm_display_info *info = &connector->display_info;
6137         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6138         struct hdmi_vendor_infoframe hv_frame;
6139         struct hdmi_avi_infoframe avi_frame;
6140
6141         memset(&hv_frame, 0, sizeof(hv_frame));
6142         memset(&avi_frame, 0, sizeof(avi_frame));
6143
6144         timing_out->h_border_left = 0;
6145         timing_out->h_border_right = 0;
6146         timing_out->v_border_top = 0;
6147         timing_out->v_border_bottom = 0;
6148         /* TODO: un-hardcode */
6149         if (drm_mode_is_420_only(info, mode_in)
6150                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6151                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6152         else if (drm_mode_is_420_also(info, mode_in)
6153                         && aconnector->force_yuv420_output)
6154                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6155         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6156                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6157                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6158         else
6159                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6160
6161         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6162         timing_out->display_color_depth = convert_color_depth_from_display_info(
6163                 connector,
6164                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6165                 requested_bpc);
6166         timing_out->scan_type = SCANNING_TYPE_NODATA;
6167         timing_out->hdmi_vic = 0;
6168
6169         if(old_stream) {
6170                 timing_out->vic = old_stream->timing.vic;
6171                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6172                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6173         } else {
6174                 timing_out->vic = drm_match_cea_mode(mode_in);
6175                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6176                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6177                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6178                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6179         }
6180
6181         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6182                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6183                 timing_out->vic = avi_frame.video_code;
6184                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6185                 timing_out->hdmi_vic = hv_frame.vic;
6186         }
6187
6188         if (is_freesync_video_mode(mode_in, aconnector)) {
6189                 timing_out->h_addressable = mode_in->hdisplay;
6190                 timing_out->h_total = mode_in->htotal;
6191                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6192                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6193                 timing_out->v_total = mode_in->vtotal;
6194                 timing_out->v_addressable = mode_in->vdisplay;
6195                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6196                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6197                 timing_out->pix_clk_100hz = mode_in->clock * 10;
6198         } else {
6199                 timing_out->h_addressable = mode_in->crtc_hdisplay;
6200                 timing_out->h_total = mode_in->crtc_htotal;
6201                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6202                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6203                 timing_out->v_total = mode_in->crtc_vtotal;
6204                 timing_out->v_addressable = mode_in->crtc_vdisplay;
6205                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6206                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6207                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6208         }
6209
6210         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6211
6212         stream->output_color_space = get_output_color_space(timing_out);
6213
6214         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6215         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6216         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6217                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6218                     drm_mode_is_420_also(info, mode_in) &&
6219                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6220                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6221                         adjust_colour_depth_from_display_info(timing_out, info);
6222                 }
6223         }
6224 }
6225
6226 static void fill_audio_info(struct audio_info *audio_info,
6227                             const struct drm_connector *drm_connector,
6228                             const struct dc_sink *dc_sink)
6229 {
6230         int i = 0;
6231         int cea_revision = 0;
6232         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6233
6234         audio_info->manufacture_id = edid_caps->manufacturer_id;
6235         audio_info->product_id = edid_caps->product_id;
6236
6237         cea_revision = drm_connector->display_info.cea_rev;
6238
6239         strscpy(audio_info->display_name,
6240                 edid_caps->display_name,
6241                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6242
6243         if (cea_revision >= 3) {
6244                 audio_info->mode_count = edid_caps->audio_mode_count;
6245
6246                 for (i = 0; i < audio_info->mode_count; ++i) {
6247                         audio_info->modes[i].format_code =
6248                                         (enum audio_format_code)
6249                                         (edid_caps->audio_modes[i].format_code);
6250                         audio_info->modes[i].channel_count =
6251                                         edid_caps->audio_modes[i].channel_count;
6252                         audio_info->modes[i].sample_rates.all =
6253                                         edid_caps->audio_modes[i].sample_rate;
6254                         audio_info->modes[i].sample_size =
6255                                         edid_caps->audio_modes[i].sample_size;
6256                 }
6257         }
6258
6259         audio_info->flags.all = edid_caps->speaker_flags;
6260
6261         /* TODO: We only check for the progressive mode, check for interlace mode too */
6262         if (drm_connector->latency_present[0]) {
6263                 audio_info->video_latency = drm_connector->video_latency[0];
6264                 audio_info->audio_latency = drm_connector->audio_latency[0];
6265         }
6266
6267         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6268
6269 }
6270
6271 static void
6272 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6273                                       struct drm_display_mode *dst_mode)
6274 {
6275         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6276         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6277         dst_mode->crtc_clock = src_mode->crtc_clock;
6278         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6279         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6280         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6281         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6282         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6283         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6284         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6285         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6286         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6287         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6288         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6289 }
6290
6291 static void
6292 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6293                                         const struct drm_display_mode *native_mode,
6294                                         bool scale_enabled)
6295 {
6296         if (scale_enabled) {
6297                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6298         } else if (native_mode->clock == drm_mode->clock &&
6299                         native_mode->htotal == drm_mode->htotal &&
6300                         native_mode->vtotal == drm_mode->vtotal) {
6301                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6302         } else {
6303                 /* no scaling nor amdgpu inserted, no need to patch */
6304         }
6305 }
6306
6307 static struct dc_sink *
6308 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6309 {
6310         struct dc_sink_init_data sink_init_data = { 0 };
6311         struct dc_sink *sink = NULL;
6312         sink_init_data.link = aconnector->dc_link;
6313         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6314
6315         sink = dc_sink_create(&sink_init_data);
6316         if (!sink) {
6317                 DRM_ERROR("Failed to create sink!\n");
6318                 return NULL;
6319         }
6320         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6321
6322         return sink;
6323 }
6324
6325 static void set_multisync_trigger_params(
6326                 struct dc_stream_state *stream)
6327 {
6328         struct dc_stream_state *master = NULL;
6329
6330         if (stream->triggered_crtc_reset.enabled) {
6331                 master = stream->triggered_crtc_reset.event_source;
6332                 stream->triggered_crtc_reset.event =
6333                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6334                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6335                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6336         }
6337 }
6338
6339 static void set_master_stream(struct dc_stream_state *stream_set[],
6340                               int stream_count)
6341 {
6342         int j, highest_rfr = 0, master_stream = 0;
6343
6344         for (j = 0;  j < stream_count; j++) {
6345                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6346                         int refresh_rate = 0;
6347
6348                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6349                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6350                         if (refresh_rate > highest_rfr) {
6351                                 highest_rfr = refresh_rate;
6352                                 master_stream = j;
6353                         }
6354                 }
6355         }
6356         for (j = 0;  j < stream_count; j++) {
6357                 if (stream_set[j])
6358                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6359         }
6360 }
6361
6362 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6363 {
6364         int i = 0;
6365         struct dc_stream_state *stream;
6366
6367         if (context->stream_count < 2)
6368                 return;
6369         for (i = 0; i < context->stream_count ; i++) {
6370                 if (!context->streams[i])
6371                         continue;
6372                 /*
6373                  * TODO: add a function to read AMD VSDB bits and set
6374                  * crtc_sync_master.multi_sync_enabled flag
6375                  * For now it's set to false
6376                  */
6377         }
6378
6379         set_master_stream(context->streams, context->stream_count);
6380
6381         for (i = 0; i < context->stream_count ; i++) {
6382                 stream = context->streams[i];
6383
6384                 if (!stream)
6385                         continue;
6386
6387                 set_multisync_trigger_params(stream);
6388         }
6389 }
6390
6391 #if defined(CONFIG_DRM_AMD_DC_DCN)
6392 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6393                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6394                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6395 {
6396         stream->timing.flags.DSC = 0;
6397         dsc_caps->is_dsc_supported = false;
6398
6399         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6400                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6401                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6402                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6403                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6404                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6405                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6406                                 dsc_caps);
6407         }
6408 }
6409
6410 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6411                                     struct dc_sink *sink, struct dc_stream_state *stream,
6412                                     struct dsc_dec_dpcd_caps *dsc_caps,
6413                                     uint32_t max_dsc_target_bpp_limit_override)
6414 {
6415         const struct dc_link_settings *verified_link_cap = NULL;
6416         uint32_t link_bw_in_kbps;
6417         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6418         struct dc *dc = sink->ctx->dc;
6419         struct dc_dsc_bw_range bw_range = {0};
6420         struct dc_dsc_config dsc_cfg = {0};
6421
6422         verified_link_cap = dc_link_get_link_cap(stream->link);
6423         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6424         edp_min_bpp_x16 = 8 * 16;
6425         edp_max_bpp_x16 = 8 * 16;
6426
6427         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6428                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6429
6430         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6431                 edp_min_bpp_x16 = edp_max_bpp_x16;
6432
6433         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6434                                 dc->debug.dsc_min_slice_height_override,
6435                                 edp_min_bpp_x16, edp_max_bpp_x16,
6436                                 dsc_caps,
6437                                 &stream->timing,
6438                                 &bw_range)) {
6439
6440                 if (bw_range.max_kbps < link_bw_in_kbps) {
6441                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6442                                         dsc_caps,
6443                                         dc->debug.dsc_min_slice_height_override,
6444                                         max_dsc_target_bpp_limit_override,
6445                                         0,
6446                                         &stream->timing,
6447                                         &dsc_cfg)) {
6448                                 stream->timing.dsc_cfg = dsc_cfg;
6449                                 stream->timing.flags.DSC = 1;
6450                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6451                         }
6452                         return;
6453                 }
6454         }
6455
6456         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6457                                 dsc_caps,
6458                                 dc->debug.dsc_min_slice_height_override,
6459                                 max_dsc_target_bpp_limit_override,
6460                                 link_bw_in_kbps,
6461                                 &stream->timing,
6462                                 &dsc_cfg)) {
6463                 stream->timing.dsc_cfg = dsc_cfg;
6464                 stream->timing.flags.DSC = 1;
6465         }
6466 }
6467
6468 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6469                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6470                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6471 {
6472         struct drm_connector *drm_connector = &aconnector->base;
6473         uint32_t link_bandwidth_kbps;
6474         uint32_t max_dsc_target_bpp_limit_override = 0;
6475         struct dc *dc = sink->ctx->dc;
6476         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6477         uint32_t dsc_max_supported_bw_in_kbps;
6478
6479         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6480                                                         dc_link_get_link_cap(aconnector->dc_link));
6481
6482         if (stream->link && stream->link->local_sink)
6483                 max_dsc_target_bpp_limit_override =
6484                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6485
6486         /* Set DSC policy according to dsc_clock_en */
6487         dc_dsc_policy_set_enable_dsc_when_not_needed(
6488                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6489
6490         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6491             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6492
6493                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6494
6495         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6496                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6497                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6498                                                 dsc_caps,
6499                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6500                                                 max_dsc_target_bpp_limit_override,
6501                                                 link_bandwidth_kbps,
6502                                                 &stream->timing,
6503                                                 &stream->timing.dsc_cfg)) {
6504                                 stream->timing.flags.DSC = 1;
6505                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6506                                                                  __func__, drm_connector->name);
6507                         }
6508                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6509                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6510                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6511                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6512
6513                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6514                                         max_supported_bw_in_kbps > 0 &&
6515                                         dsc_max_supported_bw_in_kbps > 0)
6516                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6517                                                 dsc_caps,
6518                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6519                                                 max_dsc_target_bpp_limit_override,
6520                                                 dsc_max_supported_bw_in_kbps,
6521                                                 &stream->timing,
6522                                                 &stream->timing.dsc_cfg)) {
6523                                         stream->timing.flags.DSC = 1;
6524                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6525                                                                          __func__, drm_connector->name);
6526                                 }
6527                 }
6528         }
6529
6530         /* Overwrite the stream flag if DSC is enabled through debugfs */
6531         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6532                 stream->timing.flags.DSC = 1;
6533
6534         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6535                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6536
6537         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6538                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6539
6540         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6541                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6542 }
6543 #endif /* CONFIG_DRM_AMD_DC_DCN */
6544
6545 /**
6546  * DOC: FreeSync Video
6547  *
6548  * When a userspace application wants to play a video, the content follows a
6549  * standard format definition that usually specifies the FPS for that format.
6550  * The below list illustrates some video format and the expected FPS,
6551  * respectively:
6552  *
6553  * - TV/NTSC (23.976 FPS)
6554  * - Cinema (24 FPS)
6555  * - TV/PAL (25 FPS)
6556  * - TV/NTSC (29.97 FPS)
6557  * - TV/NTSC (30 FPS)
6558  * - Cinema HFR (48 FPS)
6559  * - TV/PAL (50 FPS)
6560  * - Commonly used (60 FPS)
6561  * - Multiples of 24 (48,72,96,120 FPS)
6562  *
6563  * The list of standards video format is not huge and can be added to the
6564  * connector modeset list beforehand. With that, userspace can leverage
6565  * FreeSync to extends the front porch in order to attain the target refresh
6566  * rate. Such a switch will happen seamlessly, without screen blanking or
6567  * reprogramming of the output in any other way. If the userspace requests a
6568  * modesetting change compatible with FreeSync modes that only differ in the
6569  * refresh rate, DC will skip the full update and avoid blink during the
6570  * transition. For example, the video player can change the modesetting from
6571  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6572  * causing any display blink. This same concept can be applied to a mode
6573  * setting change.
6574  */
6575 static struct drm_display_mode *
6576 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6577                           bool use_probed_modes)
6578 {
6579         struct drm_display_mode *m, *m_pref = NULL;
6580         u16 current_refresh, highest_refresh;
6581         struct list_head *list_head = use_probed_modes ?
6582                                                     &aconnector->base.probed_modes :
6583                                                     &aconnector->base.modes;
6584
6585         if (aconnector->freesync_vid_base.clock != 0)
6586                 return &aconnector->freesync_vid_base;
6587
6588         /* Find the preferred mode */
6589         list_for_each_entry (m, list_head, head) {
6590                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6591                         m_pref = m;
6592                         break;
6593                 }
6594         }
6595
6596         if (!m_pref) {
6597                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6598                 m_pref = list_first_entry_or_null(
6599                         &aconnector->base.modes, struct drm_display_mode, head);
6600                 if (!m_pref) {
6601                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6602                         return NULL;
6603                 }
6604         }
6605
6606         highest_refresh = drm_mode_vrefresh(m_pref);
6607
6608         /*
6609          * Find the mode with highest refresh rate with same resolution.
6610          * For some monitors, preferred mode is not the mode with highest
6611          * supported refresh rate.
6612          */
6613         list_for_each_entry (m, list_head, head) {
6614                 current_refresh  = drm_mode_vrefresh(m);
6615
6616                 if (m->hdisplay == m_pref->hdisplay &&
6617                     m->vdisplay == m_pref->vdisplay &&
6618                     highest_refresh < current_refresh) {
6619                         highest_refresh = current_refresh;
6620                         m_pref = m;
6621                 }
6622         }
6623
6624         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6625         return m_pref;
6626 }
6627
6628 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6629                                    struct amdgpu_dm_connector *aconnector)
6630 {
6631         struct drm_display_mode *high_mode;
6632         int timing_diff;
6633
6634         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6635         if (!high_mode || !mode)
6636                 return false;
6637
6638         timing_diff = high_mode->vtotal - mode->vtotal;
6639
6640         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6641             high_mode->hdisplay != mode->hdisplay ||
6642             high_mode->vdisplay != mode->vdisplay ||
6643             high_mode->hsync_start != mode->hsync_start ||
6644             high_mode->hsync_end != mode->hsync_end ||
6645             high_mode->htotal != mode->htotal ||
6646             high_mode->hskew != mode->hskew ||
6647             high_mode->vscan != mode->vscan ||
6648             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6649             high_mode->vsync_end - mode->vsync_end != timing_diff)
6650                 return false;
6651         else
6652                 return true;
6653 }
6654
6655 static struct dc_stream_state *
6656 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6657                        const struct drm_display_mode *drm_mode,
6658                        const struct dm_connector_state *dm_state,
6659                        const struct dc_stream_state *old_stream,
6660                        int requested_bpc)
6661 {
6662         struct drm_display_mode *preferred_mode = NULL;
6663         struct drm_connector *drm_connector;
6664         const struct drm_connector_state *con_state =
6665                 dm_state ? &dm_state->base : NULL;
6666         struct dc_stream_state *stream = NULL;
6667         struct drm_display_mode mode = *drm_mode;
6668         struct drm_display_mode saved_mode;
6669         struct drm_display_mode *freesync_mode = NULL;
6670         bool native_mode_found = false;
6671         bool recalculate_timing = false;
6672         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6673         int mode_refresh;
6674         int preferred_refresh = 0;
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676         struct dsc_dec_dpcd_caps dsc_caps;
6677 #endif
6678         struct dc_sink *sink = NULL;
6679
6680         memset(&saved_mode, 0, sizeof(saved_mode));
6681
6682         if (aconnector == NULL) {
6683                 DRM_ERROR("aconnector is NULL!\n");
6684                 return stream;
6685         }
6686
6687         drm_connector = &aconnector->base;
6688
6689         if (!aconnector->dc_sink) {
6690                 sink = create_fake_sink(aconnector);
6691                 if (!sink)
6692                         return stream;
6693         } else {
6694                 sink = aconnector->dc_sink;
6695                 dc_sink_retain(sink);
6696         }
6697
6698         stream = dc_create_stream_for_sink(sink);
6699
6700         if (stream == NULL) {
6701                 DRM_ERROR("Failed to create stream for sink!\n");
6702                 goto finish;
6703         }
6704
6705         stream->dm_stream_context = aconnector;
6706
6707         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6708                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6709
6710         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6711                 /* Search for preferred mode */
6712                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6713                         native_mode_found = true;
6714                         break;
6715                 }
6716         }
6717         if (!native_mode_found)
6718                 preferred_mode = list_first_entry_or_null(
6719                                 &aconnector->base.modes,
6720                                 struct drm_display_mode,
6721                                 head);
6722
6723         mode_refresh = drm_mode_vrefresh(&mode);
6724
6725         if (preferred_mode == NULL) {
6726                 /*
6727                  * This may not be an error, the use case is when we have no
6728                  * usermode calls to reset and set mode upon hotplug. In this
6729                  * case, we call set mode ourselves to restore the previous mode
6730                  * and the modelist may not be filled in in time.
6731                  */
6732                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6733         } else {
6734                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6735                 if (recalculate_timing) {
6736                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6737                         drm_mode_copy(&saved_mode, &mode);
6738                         drm_mode_copy(&mode, freesync_mode);
6739                 } else {
6740                         decide_crtc_timing_for_drm_display_mode(
6741                                 &mode, preferred_mode, scale);
6742
6743                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6744                 }
6745         }
6746
6747         if (recalculate_timing)
6748                 drm_mode_set_crtcinfo(&saved_mode, 0);
6749         else if (!dm_state)
6750                 drm_mode_set_crtcinfo(&mode, 0);
6751
6752        /*
6753         * If scaling is enabled and refresh rate didn't change
6754         * we copy the vic and polarities of the old timings
6755         */
6756         if (!scale || mode_refresh != preferred_refresh)
6757                 fill_stream_properties_from_drm_display_mode(
6758                         stream, &mode, &aconnector->base, con_state, NULL,
6759                         requested_bpc);
6760         else
6761                 fill_stream_properties_from_drm_display_mode(
6762                         stream, &mode, &aconnector->base, con_state, old_stream,
6763                         requested_bpc);
6764
6765 #if defined(CONFIG_DRM_AMD_DC_DCN)
6766         /* SST DSC determination policy */
6767         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6768         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6769                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6770 #endif
6771
6772         update_stream_scaling_settings(&mode, dm_state, stream);
6773
6774         fill_audio_info(
6775                 &stream->audio_info,
6776                 drm_connector,
6777                 sink);
6778
6779         update_stream_signal(stream, sink);
6780
6781         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6782                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6783
6784         if (stream->link->psr_settings.psr_feature_enabled) {
6785                 //
6786                 // should decide stream support vsc sdp colorimetry capability
6787                 // before building vsc info packet
6788                 //
6789                 stream->use_vsc_sdp_for_colorimetry = false;
6790                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6791                         stream->use_vsc_sdp_for_colorimetry =
6792                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6793                 } else {
6794                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6795                                 stream->use_vsc_sdp_for_colorimetry = true;
6796                 }
6797                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6798                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6799
6800         }
6801 finish:
6802         dc_sink_release(sink);
6803
6804         return stream;
6805 }
6806
6807 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6808 {
6809         drm_crtc_cleanup(crtc);
6810         kfree(crtc);
6811 }
6812
6813 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6814                                   struct drm_crtc_state *state)
6815 {
6816         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6817
6818         /* TODO Destroy dc_stream objects are stream object is flattened */
6819         if (cur->stream)
6820                 dc_stream_release(cur->stream);
6821
6822
6823         __drm_atomic_helper_crtc_destroy_state(state);
6824
6825
6826         kfree(state);
6827 }
6828
6829 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6830 {
6831         struct dm_crtc_state *state;
6832
6833         if (crtc->state)
6834                 dm_crtc_destroy_state(crtc, crtc->state);
6835
6836         state = kzalloc(sizeof(*state), GFP_KERNEL);
6837         if (WARN_ON(!state))
6838                 return;
6839
6840         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6841 }
6842
6843 static struct drm_crtc_state *
6844 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6845 {
6846         struct dm_crtc_state *state, *cur;
6847
6848         cur = to_dm_crtc_state(crtc->state);
6849
6850         if (WARN_ON(!crtc->state))
6851                 return NULL;
6852
6853         state = kzalloc(sizeof(*state), GFP_KERNEL);
6854         if (!state)
6855                 return NULL;
6856
6857         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6858
6859         if (cur->stream) {
6860                 state->stream = cur->stream;
6861                 dc_stream_retain(state->stream);
6862         }
6863
6864         state->active_planes = cur->active_planes;
6865         state->vrr_infopacket = cur->vrr_infopacket;
6866         state->abm_level = cur->abm_level;
6867         state->vrr_supported = cur->vrr_supported;
6868         state->freesync_config = cur->freesync_config;
6869         state->cm_has_degamma = cur->cm_has_degamma;
6870         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6871         state->mpo_requested = cur->mpo_requested;
6872         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6873
6874         return &state->base;
6875 }
6876
6877 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6878 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6879 {
6880         crtc_debugfs_init(crtc);
6881
6882         return 0;
6883 }
6884 #endif
6885
6886 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6887 {
6888         enum dc_irq_source irq_source;
6889         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6890         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6891         int rc;
6892
6893         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6894
6895         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6896
6897         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6898                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6899         return rc;
6900 }
6901
6902 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6903 {
6904         enum dc_irq_source irq_source;
6905         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6906         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6907         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6908         struct amdgpu_display_manager *dm = &adev->dm;
6909         struct vblank_control_work *work;
6910         int rc = 0;
6911
6912         if (enable) {
6913                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6914                 if (amdgpu_dm_vrr_active(acrtc_state))
6915                         rc = dm_set_vupdate_irq(crtc, true);
6916         } else {
6917                 /* vblank irq off -> vupdate irq off */
6918                 rc = dm_set_vupdate_irq(crtc, false);
6919         }
6920
6921         if (rc)
6922                 return rc;
6923
6924         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6925
6926         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6927                 return -EBUSY;
6928
6929         if (amdgpu_in_reset(adev))
6930                 return 0;
6931
6932         if (dm->vblank_control_workqueue) {
6933                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6934                 if (!work)
6935                         return -ENOMEM;
6936
6937                 INIT_WORK(&work->work, vblank_control_worker);
6938                 work->dm = dm;
6939                 work->acrtc = acrtc;
6940                 work->enable = enable;
6941
6942                 if (acrtc_state->stream) {
6943                         dc_stream_retain(acrtc_state->stream);
6944                         work->stream = acrtc_state->stream;
6945                 }
6946
6947                 queue_work(dm->vblank_control_workqueue, &work->work);
6948         }
6949
6950         return 0;
6951 }
6952
6953 static int dm_enable_vblank(struct drm_crtc *crtc)
6954 {
6955         return dm_set_vblank(crtc, true);
6956 }
6957
6958 static void dm_disable_vblank(struct drm_crtc *crtc)
6959 {
6960         dm_set_vblank(crtc, false);
6961 }
6962
6963 /* Implemented only the options currently available for the driver */
6964 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6965         .reset = dm_crtc_reset_state,
6966         .destroy = amdgpu_dm_crtc_destroy,
6967         .set_config = drm_atomic_helper_set_config,
6968         .page_flip = drm_atomic_helper_page_flip,
6969         .atomic_duplicate_state = dm_crtc_duplicate_state,
6970         .atomic_destroy_state = dm_crtc_destroy_state,
6971         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6972         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6973         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6974         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6975         .enable_vblank = dm_enable_vblank,
6976         .disable_vblank = dm_disable_vblank,
6977         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6978 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6979         .late_register = amdgpu_dm_crtc_late_register,
6980 #endif
6981 };
6982
6983 static enum drm_connector_status
6984 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6985 {
6986         bool connected;
6987         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6988
6989         /*
6990          * Notes:
6991          * 1. This interface is NOT called in context of HPD irq.
6992          * 2. This interface *is called* in context of user-mode ioctl. Which
6993          * makes it a bad place for *any* MST-related activity.
6994          */
6995
6996         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6997             !aconnector->fake_enable)
6998                 connected = (aconnector->dc_sink != NULL);
6999         else
7000                 connected = (aconnector->base.force == DRM_FORCE_ON);
7001
7002         update_subconnector_property(aconnector);
7003
7004         return (connected ? connector_status_connected :
7005                         connector_status_disconnected);
7006 }
7007
7008 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
7009                                             struct drm_connector_state *connector_state,
7010                                             struct drm_property *property,
7011                                             uint64_t val)
7012 {
7013         struct drm_device *dev = connector->dev;
7014         struct amdgpu_device *adev = drm_to_adev(dev);
7015         struct dm_connector_state *dm_old_state =
7016                 to_dm_connector_state(connector->state);
7017         struct dm_connector_state *dm_new_state =
7018                 to_dm_connector_state(connector_state);
7019
7020         int ret = -EINVAL;
7021
7022         if (property == dev->mode_config.scaling_mode_property) {
7023                 enum amdgpu_rmx_type rmx_type;
7024
7025                 switch (val) {
7026                 case DRM_MODE_SCALE_CENTER:
7027                         rmx_type = RMX_CENTER;
7028                         break;
7029                 case DRM_MODE_SCALE_ASPECT:
7030                         rmx_type = RMX_ASPECT;
7031                         break;
7032                 case DRM_MODE_SCALE_FULLSCREEN:
7033                         rmx_type = RMX_FULL;
7034                         break;
7035                 case DRM_MODE_SCALE_NONE:
7036                 default:
7037                         rmx_type = RMX_OFF;
7038                         break;
7039                 }
7040
7041                 if (dm_old_state->scaling == rmx_type)
7042                         return 0;
7043
7044                 dm_new_state->scaling = rmx_type;
7045                 ret = 0;
7046         } else if (property == adev->mode_info.underscan_hborder_property) {
7047                 dm_new_state->underscan_hborder = val;
7048                 ret = 0;
7049         } else if (property == adev->mode_info.underscan_vborder_property) {
7050                 dm_new_state->underscan_vborder = val;
7051                 ret = 0;
7052         } else if (property == adev->mode_info.underscan_property) {
7053                 dm_new_state->underscan_enable = val;
7054                 ret = 0;
7055         } else if (property == adev->mode_info.abm_level_property) {
7056                 dm_new_state->abm_level = val;
7057                 ret = 0;
7058         }
7059
7060         return ret;
7061 }
7062
7063 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7064                                             const struct drm_connector_state *state,
7065                                             struct drm_property *property,
7066                                             uint64_t *val)
7067 {
7068         struct drm_device *dev = connector->dev;
7069         struct amdgpu_device *adev = drm_to_adev(dev);
7070         struct dm_connector_state *dm_state =
7071                 to_dm_connector_state(state);
7072         int ret = -EINVAL;
7073
7074         if (property == dev->mode_config.scaling_mode_property) {
7075                 switch (dm_state->scaling) {
7076                 case RMX_CENTER:
7077                         *val = DRM_MODE_SCALE_CENTER;
7078                         break;
7079                 case RMX_ASPECT:
7080                         *val = DRM_MODE_SCALE_ASPECT;
7081                         break;
7082                 case RMX_FULL:
7083                         *val = DRM_MODE_SCALE_FULLSCREEN;
7084                         break;
7085                 case RMX_OFF:
7086                 default:
7087                         *val = DRM_MODE_SCALE_NONE;
7088                         break;
7089                 }
7090                 ret = 0;
7091         } else if (property == adev->mode_info.underscan_hborder_property) {
7092                 *val = dm_state->underscan_hborder;
7093                 ret = 0;
7094         } else if (property == adev->mode_info.underscan_vborder_property) {
7095                 *val = dm_state->underscan_vborder;
7096                 ret = 0;
7097         } else if (property == adev->mode_info.underscan_property) {
7098                 *val = dm_state->underscan_enable;
7099                 ret = 0;
7100         } else if (property == adev->mode_info.abm_level_property) {
7101                 *val = dm_state->abm_level;
7102                 ret = 0;
7103         }
7104
7105         return ret;
7106 }
7107
7108 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7109 {
7110         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7111
7112         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7113 }
7114
7115 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7116 {
7117         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7118         const struct dc_link *link = aconnector->dc_link;
7119         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7120         struct amdgpu_display_manager *dm = &adev->dm;
7121         int i;
7122
7123         /*
7124          * Call only if mst_mgr was iniitalized before since it's not done
7125          * for all connector types.
7126          */
7127         if (aconnector->mst_mgr.dev)
7128                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7129
7130         for (i = 0; i < dm->num_of_edps; i++) {
7131                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7132                         backlight_device_unregister(dm->backlight_dev[i]);
7133                         dm->backlight_dev[i] = NULL;
7134                 }
7135         }
7136
7137         if (aconnector->dc_em_sink)
7138                 dc_sink_release(aconnector->dc_em_sink);
7139         aconnector->dc_em_sink = NULL;
7140         if (aconnector->dc_sink)
7141                 dc_sink_release(aconnector->dc_sink);
7142         aconnector->dc_sink = NULL;
7143
7144         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7145         drm_connector_unregister(connector);
7146         drm_connector_cleanup(connector);
7147         if (aconnector->i2c) {
7148                 i2c_del_adapter(&aconnector->i2c->base);
7149                 kfree(aconnector->i2c);
7150         }
7151         kfree(aconnector->dm_dp_aux.aux.name);
7152
7153         kfree(connector);
7154 }
7155
7156 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7157 {
7158         struct dm_connector_state *state =
7159                 to_dm_connector_state(connector->state);
7160
7161         if (connector->state)
7162                 __drm_atomic_helper_connector_destroy_state(connector->state);
7163
7164         kfree(state);
7165
7166         state = kzalloc(sizeof(*state), GFP_KERNEL);
7167
7168         if (state) {
7169                 state->scaling = RMX_OFF;
7170                 state->underscan_enable = false;
7171                 state->underscan_hborder = 0;
7172                 state->underscan_vborder = 0;
7173                 state->base.max_requested_bpc = 8;
7174                 state->vcpi_slots = 0;
7175                 state->pbn = 0;
7176                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7177                         state->abm_level = amdgpu_dm_abm_level;
7178
7179                 __drm_atomic_helper_connector_reset(connector, &state->base);
7180         }
7181 }
7182
7183 struct drm_connector_state *
7184 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7185 {
7186         struct dm_connector_state *state =
7187                 to_dm_connector_state(connector->state);
7188
7189         struct dm_connector_state *new_state =
7190                         kmemdup(state, sizeof(*state), GFP_KERNEL);
7191
7192         if (!new_state)
7193                 return NULL;
7194
7195         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7196
7197         new_state->freesync_capable = state->freesync_capable;
7198         new_state->abm_level = state->abm_level;
7199         new_state->scaling = state->scaling;
7200         new_state->underscan_enable = state->underscan_enable;
7201         new_state->underscan_hborder = state->underscan_hborder;
7202         new_state->underscan_vborder = state->underscan_vborder;
7203         new_state->vcpi_slots = state->vcpi_slots;
7204         new_state->pbn = state->pbn;
7205         return &new_state->base;
7206 }
7207
7208 static int
7209 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7210 {
7211         struct amdgpu_dm_connector *amdgpu_dm_connector =
7212                 to_amdgpu_dm_connector(connector);
7213         int r;
7214
7215         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7216             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7217                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7218                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7219                 if (r)
7220                         return r;
7221         }
7222
7223 #if defined(CONFIG_DEBUG_FS)
7224         connector_debugfs_init(amdgpu_dm_connector);
7225 #endif
7226
7227         return 0;
7228 }
7229
7230 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7231         .reset = amdgpu_dm_connector_funcs_reset,
7232         .detect = amdgpu_dm_connector_detect,
7233         .fill_modes = drm_helper_probe_single_connector_modes,
7234         .destroy = amdgpu_dm_connector_destroy,
7235         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7236         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7237         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7238         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7239         .late_register = amdgpu_dm_connector_late_register,
7240         .early_unregister = amdgpu_dm_connector_unregister
7241 };
7242
7243 static int get_modes(struct drm_connector *connector)
7244 {
7245         return amdgpu_dm_connector_get_modes(connector);
7246 }
7247
7248 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7249 {
7250         struct dc_sink_init_data init_params = {
7251                         .link = aconnector->dc_link,
7252                         .sink_signal = SIGNAL_TYPE_VIRTUAL
7253         };
7254         struct edid *edid;
7255
7256         if (!aconnector->base.edid_blob_ptr) {
7257                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7258                                 aconnector->base.name);
7259
7260                 aconnector->base.force = DRM_FORCE_OFF;
7261                 aconnector->base.override_edid = false;
7262                 return;
7263         }
7264
7265         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7266
7267         aconnector->edid = edid;
7268
7269         aconnector->dc_em_sink = dc_link_add_remote_sink(
7270                 aconnector->dc_link,
7271                 (uint8_t *)edid,
7272                 (edid->extensions + 1) * EDID_LENGTH,
7273                 &init_params);
7274
7275         if (aconnector->base.force == DRM_FORCE_ON) {
7276                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7277                 aconnector->dc_link->local_sink :
7278                 aconnector->dc_em_sink;
7279                 dc_sink_retain(aconnector->dc_sink);
7280         }
7281 }
7282
7283 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7284 {
7285         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7286
7287         /*
7288          * In case of headless boot with force on for DP managed connector
7289          * Those settings have to be != 0 to get initial modeset
7290          */
7291         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7292                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7293                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7294         }
7295
7296
7297         aconnector->base.override_edid = true;
7298         create_eml_sink(aconnector);
7299 }
7300
7301 struct dc_stream_state *
7302 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7303                                 const struct drm_display_mode *drm_mode,
7304                                 const struct dm_connector_state *dm_state,
7305                                 const struct dc_stream_state *old_stream)
7306 {
7307         struct drm_connector *connector = &aconnector->base;
7308         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7309         struct dc_stream_state *stream;
7310         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7311         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7312         enum dc_status dc_result = DC_OK;
7313
7314         do {
7315                 stream = create_stream_for_sink(aconnector, drm_mode,
7316                                                 dm_state, old_stream,
7317                                                 requested_bpc);
7318                 if (stream == NULL) {
7319                         DRM_ERROR("Failed to create stream for sink!\n");
7320                         break;
7321                 }
7322
7323                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7324                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7325                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7326
7327                 if (dc_result != DC_OK) {
7328                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7329                                       drm_mode->hdisplay,
7330                                       drm_mode->vdisplay,
7331                                       drm_mode->clock,
7332                                       dc_result,
7333                                       dc_status_to_str(dc_result));
7334
7335                         dc_stream_release(stream);
7336                         stream = NULL;
7337                         requested_bpc -= 2; /* lower bpc to retry validation */
7338                 }
7339
7340         } while (stream == NULL && requested_bpc >= 6);
7341
7342         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7343                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7344
7345                 aconnector->force_yuv420_output = true;
7346                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7347                                                 dm_state, old_stream);
7348                 aconnector->force_yuv420_output = false;
7349         }
7350
7351         return stream;
7352 }
7353
7354 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7355                                    struct drm_display_mode *mode)
7356 {
7357         int result = MODE_ERROR;
7358         struct dc_sink *dc_sink;
7359         /* TODO: Unhardcode stream count */
7360         struct dc_stream_state *stream;
7361         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7362
7363         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7364                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7365                 return result;
7366
7367         /*
7368          * Only run this the first time mode_valid is called to initilialize
7369          * EDID mgmt
7370          */
7371         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7372                 !aconnector->dc_em_sink)
7373                 handle_edid_mgmt(aconnector);
7374
7375         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7376
7377         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7378                                 aconnector->base.force != DRM_FORCE_ON) {
7379                 DRM_ERROR("dc_sink is NULL!\n");
7380                 goto fail;
7381         }
7382
7383         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7384         if (stream) {
7385                 dc_stream_release(stream);
7386                 result = MODE_OK;
7387         }
7388
7389 fail:
7390         /* TODO: error handling*/
7391         return result;
7392 }
7393
7394 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7395                                 struct dc_info_packet *out)
7396 {
7397         struct hdmi_drm_infoframe frame;
7398         unsigned char buf[30]; /* 26 + 4 */
7399         ssize_t len;
7400         int ret, i;
7401
7402         memset(out, 0, sizeof(*out));
7403
7404         if (!state->hdr_output_metadata)
7405                 return 0;
7406
7407         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7408         if (ret)
7409                 return ret;
7410
7411         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7412         if (len < 0)
7413                 return (int)len;
7414
7415         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7416         if (len != 30)
7417                 return -EINVAL;
7418
7419         /* Prepare the infopacket for DC. */
7420         switch (state->connector->connector_type) {
7421         case DRM_MODE_CONNECTOR_HDMIA:
7422                 out->hb0 = 0x87; /* type */
7423                 out->hb1 = 0x01; /* version */
7424                 out->hb2 = 0x1A; /* length */
7425                 out->sb[0] = buf[3]; /* checksum */
7426                 i = 1;
7427                 break;
7428
7429         case DRM_MODE_CONNECTOR_DisplayPort:
7430         case DRM_MODE_CONNECTOR_eDP:
7431                 out->hb0 = 0x00; /* sdp id, zero */
7432                 out->hb1 = 0x87; /* type */
7433                 out->hb2 = 0x1D; /* payload len - 1 */
7434                 out->hb3 = (0x13 << 2); /* sdp version */
7435                 out->sb[0] = 0x01; /* version */
7436                 out->sb[1] = 0x1A; /* length */
7437                 i = 2;
7438                 break;
7439
7440         default:
7441                 return -EINVAL;
7442         }
7443
7444         memcpy(&out->sb[i], &buf[4], 26);
7445         out->valid = true;
7446
7447         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7448                        sizeof(out->sb), false);
7449
7450         return 0;
7451 }
7452
7453 static int
7454 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7455                                  struct drm_atomic_state *state)
7456 {
7457         struct drm_connector_state *new_con_state =
7458                 drm_atomic_get_new_connector_state(state, conn);
7459         struct drm_connector_state *old_con_state =
7460                 drm_atomic_get_old_connector_state(state, conn);
7461         struct drm_crtc *crtc = new_con_state->crtc;
7462         struct drm_crtc_state *new_crtc_state;
7463         int ret;
7464
7465         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7466
7467         if (!crtc)
7468                 return 0;
7469
7470         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7471                 struct dc_info_packet hdr_infopacket;
7472
7473                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7474                 if (ret)
7475                         return ret;
7476
7477                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7478                 if (IS_ERR(new_crtc_state))
7479                         return PTR_ERR(new_crtc_state);
7480
7481                 /*
7482                  * DC considers the stream backends changed if the
7483                  * static metadata changes. Forcing the modeset also
7484                  * gives a simple way for userspace to switch from
7485                  * 8bpc to 10bpc when setting the metadata to enter
7486                  * or exit HDR.
7487                  *
7488                  * Changing the static metadata after it's been
7489                  * set is permissible, however. So only force a
7490                  * modeset if we're entering or exiting HDR.
7491                  */
7492                 new_crtc_state->mode_changed =
7493                         !old_con_state->hdr_output_metadata ||
7494                         !new_con_state->hdr_output_metadata;
7495         }
7496
7497         return 0;
7498 }
7499
7500 static const struct drm_connector_helper_funcs
7501 amdgpu_dm_connector_helper_funcs = {
7502         /*
7503          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7504          * modes will be filtered by drm_mode_validate_size(), and those modes
7505          * are missing after user start lightdm. So we need to renew modes list.
7506          * in get_modes call back, not just return the modes count
7507          */
7508         .get_modes = get_modes,
7509         .mode_valid = amdgpu_dm_connector_mode_valid,
7510         .atomic_check = amdgpu_dm_connector_atomic_check,
7511 };
7512
7513 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7514 {
7515 }
7516
7517 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7518 {
7519         struct drm_atomic_state *state = new_crtc_state->state;
7520         struct drm_plane *plane;
7521         int num_active = 0;
7522
7523         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7524                 struct drm_plane_state *new_plane_state;
7525
7526                 /* Cursor planes are "fake". */
7527                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7528                         continue;
7529
7530                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7531
7532                 if (!new_plane_state) {
7533                         /*
7534                          * The plane is enable on the CRTC and hasn't changed
7535                          * state. This means that it previously passed
7536                          * validation and is therefore enabled.
7537                          */
7538                         num_active += 1;
7539                         continue;
7540                 }
7541
7542                 /* We need a framebuffer to be considered enabled. */
7543                 num_active += (new_plane_state->fb != NULL);
7544         }
7545
7546         return num_active;
7547 }
7548
7549 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7550                                          struct drm_crtc_state *new_crtc_state)
7551 {
7552         struct dm_crtc_state *dm_new_crtc_state =
7553                 to_dm_crtc_state(new_crtc_state);
7554
7555         dm_new_crtc_state->active_planes = 0;
7556
7557         if (!dm_new_crtc_state->stream)
7558                 return;
7559
7560         dm_new_crtc_state->active_planes =
7561                 count_crtc_active_planes(new_crtc_state);
7562 }
7563
7564 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7565                                        struct drm_atomic_state *state)
7566 {
7567         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7568                                                                           crtc);
7569         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7570         struct dc *dc = adev->dm.dc;
7571         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7572         int ret = -EINVAL;
7573
7574         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7575
7576         dm_update_crtc_active_planes(crtc, crtc_state);
7577
7578         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7579                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7580                 return ret;
7581         }
7582
7583         /*
7584          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7585          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7586          * planes are disabled, which is not supported by the hardware. And there is legacy
7587          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7588          */
7589         if (crtc_state->enable &&
7590             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7591                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7592                 return -EINVAL;
7593         }
7594
7595         /* In some use cases, like reset, no stream is attached */
7596         if (!dm_crtc_state->stream)
7597                 return 0;
7598
7599         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7600                 return 0;
7601
7602         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7603         return ret;
7604 }
7605
7606 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7607                                       const struct drm_display_mode *mode,
7608                                       struct drm_display_mode *adjusted_mode)
7609 {
7610         return true;
7611 }
7612
7613 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7614         .disable = dm_crtc_helper_disable,
7615         .atomic_check = dm_crtc_helper_atomic_check,
7616         .mode_fixup = dm_crtc_helper_mode_fixup,
7617         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7618 };
7619
7620 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7621 {
7622
7623 }
7624
7625 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7626 {
7627         switch (display_color_depth) {
7628                 case COLOR_DEPTH_666:
7629                         return 6;
7630                 case COLOR_DEPTH_888:
7631                         return 8;
7632                 case COLOR_DEPTH_101010:
7633                         return 10;
7634                 case COLOR_DEPTH_121212:
7635                         return 12;
7636                 case COLOR_DEPTH_141414:
7637                         return 14;
7638                 case COLOR_DEPTH_161616:
7639                         return 16;
7640                 default:
7641                         break;
7642                 }
7643         return 0;
7644 }
7645
7646 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7647                                           struct drm_crtc_state *crtc_state,
7648                                           struct drm_connector_state *conn_state)
7649 {
7650         struct drm_atomic_state *state = crtc_state->state;
7651         struct drm_connector *connector = conn_state->connector;
7652         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7653         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7654         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7655         struct drm_dp_mst_topology_mgr *mst_mgr;
7656         struct drm_dp_mst_port *mst_port;
7657         enum dc_color_depth color_depth;
7658         int clock, bpp = 0;
7659         bool is_y420 = false;
7660
7661         if (!aconnector->port || !aconnector->dc_sink)
7662                 return 0;
7663
7664         mst_port = aconnector->port;
7665         mst_mgr = &aconnector->mst_port->mst_mgr;
7666
7667         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7668                 return 0;
7669
7670         if (!state->duplicated) {
7671                 int max_bpc = conn_state->max_requested_bpc;
7672                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7673                                 aconnector->force_yuv420_output;
7674                 color_depth = convert_color_depth_from_display_info(connector,
7675                                                                     is_y420,
7676                                                                     max_bpc);
7677                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7678                 clock = adjusted_mode->clock;
7679                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7680         }
7681         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7682                                                                            mst_mgr,
7683                                                                            mst_port,
7684                                                                            dm_new_connector_state->pbn,
7685                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7686         if (dm_new_connector_state->vcpi_slots < 0) {
7687                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7688                 return dm_new_connector_state->vcpi_slots;
7689         }
7690         return 0;
7691 }
7692
7693 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7694         .disable = dm_encoder_helper_disable,
7695         .atomic_check = dm_encoder_helper_atomic_check
7696 };
7697
7698 #if defined(CONFIG_DRM_AMD_DC_DCN)
7699 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7700                                             struct dc_state *dc_state,
7701                                             struct dsc_mst_fairness_vars *vars)
7702 {
7703         struct dc_stream_state *stream = NULL;
7704         struct drm_connector *connector;
7705         struct drm_connector_state *new_con_state;
7706         struct amdgpu_dm_connector *aconnector;
7707         struct dm_connector_state *dm_conn_state;
7708         int i, j;
7709         int vcpi, pbn_div, pbn, slot_num = 0;
7710
7711         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7712
7713                 aconnector = to_amdgpu_dm_connector(connector);
7714
7715                 if (!aconnector->port)
7716                         continue;
7717
7718                 if (!new_con_state || !new_con_state->crtc)
7719                         continue;
7720
7721                 dm_conn_state = to_dm_connector_state(new_con_state);
7722
7723                 for (j = 0; j < dc_state->stream_count; j++) {
7724                         stream = dc_state->streams[j];
7725                         if (!stream)
7726                                 continue;
7727
7728                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7729                                 break;
7730
7731                         stream = NULL;
7732                 }
7733
7734                 if (!stream)
7735                         continue;
7736
7737                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7738                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7739                 for (j = 0; j < dc_state->stream_count; j++) {
7740                         if (vars[j].aconnector == aconnector) {
7741                                 pbn = vars[j].pbn;
7742                                 break;
7743                         }
7744                 }
7745
7746                 if (j == dc_state->stream_count)
7747                         continue;
7748
7749                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7750
7751                 if (stream->timing.flags.DSC != 1) {
7752                         dm_conn_state->pbn = pbn;
7753                         dm_conn_state->vcpi_slots = slot_num;
7754
7755                         drm_dp_mst_atomic_enable_dsc(state,
7756                                                      aconnector->port,
7757                                                      dm_conn_state->pbn,
7758                                                      0,
7759                                                      false);
7760                         continue;
7761                 }
7762
7763                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7764                                                     aconnector->port,
7765                                                     pbn, pbn_div,
7766                                                     true);
7767                 if (vcpi < 0)
7768                         return vcpi;
7769
7770                 dm_conn_state->pbn = pbn;
7771                 dm_conn_state->vcpi_slots = vcpi;
7772         }
7773         return 0;
7774 }
7775 #endif
7776
7777 static void dm_drm_plane_reset(struct drm_plane *plane)
7778 {
7779         struct dm_plane_state *amdgpu_state = NULL;
7780
7781         if (plane->state)
7782                 plane->funcs->atomic_destroy_state(plane, plane->state);
7783
7784         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7785         WARN_ON(amdgpu_state == NULL);
7786
7787         if (amdgpu_state)
7788                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7789 }
7790
7791 static struct drm_plane_state *
7792 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7793 {
7794         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7795
7796         old_dm_plane_state = to_dm_plane_state(plane->state);
7797         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7798         if (!dm_plane_state)
7799                 return NULL;
7800
7801         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7802
7803         if (old_dm_plane_state->dc_state) {
7804                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7805                 dc_plane_state_retain(dm_plane_state->dc_state);
7806         }
7807
7808         return &dm_plane_state->base;
7809 }
7810
7811 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7812                                 struct drm_plane_state *state)
7813 {
7814         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7815
7816         if (dm_plane_state->dc_state)
7817                 dc_plane_state_release(dm_plane_state->dc_state);
7818
7819         drm_atomic_helper_plane_destroy_state(plane, state);
7820 }
7821
7822 static const struct drm_plane_funcs dm_plane_funcs = {
7823         .update_plane   = drm_atomic_helper_update_plane,
7824         .disable_plane  = drm_atomic_helper_disable_plane,
7825         .destroy        = drm_primary_helper_destroy,
7826         .reset = dm_drm_plane_reset,
7827         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7828         .atomic_destroy_state = dm_drm_plane_destroy_state,
7829         .format_mod_supported = dm_plane_format_mod_supported,
7830 };
7831
7832 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7833                                       struct drm_plane_state *new_state)
7834 {
7835         struct amdgpu_framebuffer *afb;
7836         struct drm_gem_object *obj;
7837         struct amdgpu_device *adev;
7838         struct amdgpu_bo *rbo;
7839         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7840         uint32_t domain;
7841         int r;
7842
7843         if (!new_state->fb) {
7844                 DRM_DEBUG_KMS("No FB bound\n");
7845                 return 0;
7846         }
7847
7848         afb = to_amdgpu_framebuffer(new_state->fb);
7849         obj = new_state->fb->obj[0];
7850         rbo = gem_to_amdgpu_bo(obj);
7851         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7852
7853         r = amdgpu_bo_reserve(rbo, true);
7854         if (r) {
7855                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7856                 return r;
7857         }
7858
7859         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7860         if (r) {
7861                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7862                 goto error_unlock;
7863         }
7864
7865         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7866                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7867         else
7868                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7869
7870         r = amdgpu_bo_pin(rbo, domain);
7871         if (unlikely(r != 0)) {
7872                 if (r != -ERESTARTSYS)
7873                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7874                 goto error_unlock;
7875         }
7876
7877         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7878         if (unlikely(r != 0)) {
7879                 DRM_ERROR("%p bind failed\n", rbo);
7880                 goto error_unpin;
7881         }
7882
7883         amdgpu_bo_unreserve(rbo);
7884
7885         afb->address = amdgpu_bo_gpu_offset(rbo);
7886
7887         amdgpu_bo_ref(rbo);
7888
7889         /**
7890          * We don't do surface updates on planes that have been newly created,
7891          * but we also don't have the afb->address during atomic check.
7892          *
7893          * Fill in buffer attributes depending on the address here, but only on
7894          * newly created planes since they're not being used by DC yet and this
7895          * won't modify global state.
7896          */
7897         dm_plane_state_old = to_dm_plane_state(plane->state);
7898         dm_plane_state_new = to_dm_plane_state(new_state);
7899
7900         if (dm_plane_state_new->dc_state &&
7901             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7902                 struct dc_plane_state *plane_state =
7903                         dm_plane_state_new->dc_state;
7904                 bool force_disable_dcc = !plane_state->dcc.enable;
7905
7906                 fill_plane_buffer_attributes(
7907                         adev, afb, plane_state->format, plane_state->rotation,
7908                         afb->tiling_flags,
7909                         &plane_state->tiling_info, &plane_state->plane_size,
7910                         &plane_state->dcc, &plane_state->address,
7911                         afb->tmz_surface, force_disable_dcc);
7912         }
7913
7914         return 0;
7915
7916 error_unpin:
7917         amdgpu_bo_unpin(rbo);
7918
7919 error_unlock:
7920         amdgpu_bo_unreserve(rbo);
7921         return r;
7922 }
7923
7924 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7925                                        struct drm_plane_state *old_state)
7926 {
7927         struct amdgpu_bo *rbo;
7928         int r;
7929
7930         if (!old_state->fb)
7931                 return;
7932
7933         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7934         r = amdgpu_bo_reserve(rbo, false);
7935         if (unlikely(r)) {
7936                 DRM_ERROR("failed to reserve rbo before unpin\n");
7937                 return;
7938         }
7939
7940         amdgpu_bo_unpin(rbo);
7941         amdgpu_bo_unreserve(rbo);
7942         amdgpu_bo_unref(&rbo);
7943 }
7944
7945 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7946                                        struct drm_crtc_state *new_crtc_state)
7947 {
7948         struct drm_framebuffer *fb = state->fb;
7949         int min_downscale, max_upscale;
7950         int min_scale = 0;
7951         int max_scale = INT_MAX;
7952
7953         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7954         if (fb && state->crtc) {
7955                 /* Validate viewport to cover the case when only the position changes */
7956                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7957                         int viewport_width = state->crtc_w;
7958                         int viewport_height = state->crtc_h;
7959
7960                         if (state->crtc_x < 0)
7961                                 viewport_width += state->crtc_x;
7962                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7963                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7964
7965                         if (state->crtc_y < 0)
7966                                 viewport_height += state->crtc_y;
7967                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7968                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7969
7970                         if (viewport_width < 0 || viewport_height < 0) {
7971                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7972                                 return -EINVAL;
7973                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7974                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7975                                 return -EINVAL;
7976                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7977                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7978                                 return -EINVAL;
7979                         }
7980
7981                 }
7982
7983                 /* Get min/max allowed scaling factors from plane caps. */
7984                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7985                                              &min_downscale, &max_upscale);
7986                 /*
7987                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7988                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7989                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7990                  */
7991                 min_scale = (1000 << 16) / max_upscale;
7992                 max_scale = (1000 << 16) / min_downscale;
7993         }
7994
7995         return drm_atomic_helper_check_plane_state(
7996                 state, new_crtc_state, min_scale, max_scale, true, true);
7997 }
7998
7999 static int dm_plane_atomic_check(struct drm_plane *plane,
8000                                  struct drm_atomic_state *state)
8001 {
8002         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
8003                                                                                  plane);
8004         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8005         struct dc *dc = adev->dm.dc;
8006         struct dm_plane_state *dm_plane_state;
8007         struct dc_scaling_info scaling_info;
8008         struct drm_crtc_state *new_crtc_state;
8009         int ret;
8010
8011         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
8012
8013         dm_plane_state = to_dm_plane_state(new_plane_state);
8014
8015         if (!dm_plane_state->dc_state)
8016                 return 0;
8017
8018         new_crtc_state =
8019                 drm_atomic_get_new_crtc_state(state,
8020                                               new_plane_state->crtc);
8021         if (!new_crtc_state)
8022                 return -EINVAL;
8023
8024         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8025         if (ret)
8026                 return ret;
8027
8028         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8029         if (ret)
8030                 return ret;
8031
8032         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8033                 return 0;
8034
8035         return -EINVAL;
8036 }
8037
8038 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8039                                        struct drm_atomic_state *state)
8040 {
8041         /* Only support async updates on cursor planes. */
8042         if (plane->type != DRM_PLANE_TYPE_CURSOR)
8043                 return -EINVAL;
8044
8045         return 0;
8046 }
8047
8048 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8049                                          struct drm_atomic_state *state)
8050 {
8051         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8052                                                                            plane);
8053         struct drm_plane_state *old_state =
8054                 drm_atomic_get_old_plane_state(state, plane);
8055
8056         trace_amdgpu_dm_atomic_update_cursor(new_state);
8057
8058         swap(plane->state->fb, new_state->fb);
8059
8060         plane->state->src_x = new_state->src_x;
8061         plane->state->src_y = new_state->src_y;
8062         plane->state->src_w = new_state->src_w;
8063         plane->state->src_h = new_state->src_h;
8064         plane->state->crtc_x = new_state->crtc_x;
8065         plane->state->crtc_y = new_state->crtc_y;
8066         plane->state->crtc_w = new_state->crtc_w;
8067         plane->state->crtc_h = new_state->crtc_h;
8068
8069         handle_cursor_update(plane, old_state);
8070 }
8071
8072 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8073         .prepare_fb = dm_plane_helper_prepare_fb,
8074         .cleanup_fb = dm_plane_helper_cleanup_fb,
8075         .atomic_check = dm_plane_atomic_check,
8076         .atomic_async_check = dm_plane_atomic_async_check,
8077         .atomic_async_update = dm_plane_atomic_async_update
8078 };
8079
8080 /*
8081  * TODO: these are currently initialized to rgb formats only.
8082  * For future use cases we should either initialize them dynamically based on
8083  * plane capabilities, or initialize this array to all formats, so internal drm
8084  * check will succeed, and let DC implement proper check
8085  */
8086 static const uint32_t rgb_formats[] = {
8087         DRM_FORMAT_XRGB8888,
8088         DRM_FORMAT_ARGB8888,
8089         DRM_FORMAT_RGBA8888,
8090         DRM_FORMAT_XRGB2101010,
8091         DRM_FORMAT_XBGR2101010,
8092         DRM_FORMAT_ARGB2101010,
8093         DRM_FORMAT_ABGR2101010,
8094         DRM_FORMAT_XRGB16161616,
8095         DRM_FORMAT_XBGR16161616,
8096         DRM_FORMAT_ARGB16161616,
8097         DRM_FORMAT_ABGR16161616,
8098         DRM_FORMAT_XBGR8888,
8099         DRM_FORMAT_ABGR8888,
8100         DRM_FORMAT_RGB565,
8101 };
8102
8103 static const uint32_t overlay_formats[] = {
8104         DRM_FORMAT_XRGB8888,
8105         DRM_FORMAT_ARGB8888,
8106         DRM_FORMAT_RGBA8888,
8107         DRM_FORMAT_XBGR8888,
8108         DRM_FORMAT_ABGR8888,
8109         DRM_FORMAT_RGB565
8110 };
8111
8112 static const u32 cursor_formats[] = {
8113         DRM_FORMAT_ARGB8888
8114 };
8115
8116 static int get_plane_formats(const struct drm_plane *plane,
8117                              const struct dc_plane_cap *plane_cap,
8118                              uint32_t *formats, int max_formats)
8119 {
8120         int i, num_formats = 0;
8121
8122         /*
8123          * TODO: Query support for each group of formats directly from
8124          * DC plane caps. This will require adding more formats to the
8125          * caps list.
8126          */
8127
8128         switch (plane->type) {
8129         case DRM_PLANE_TYPE_PRIMARY:
8130                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8131                         if (num_formats >= max_formats)
8132                                 break;
8133
8134                         formats[num_formats++] = rgb_formats[i];
8135                 }
8136
8137                 if (plane_cap && plane_cap->pixel_format_support.nv12)
8138                         formats[num_formats++] = DRM_FORMAT_NV12;
8139                 if (plane_cap && plane_cap->pixel_format_support.p010)
8140                         formats[num_formats++] = DRM_FORMAT_P010;
8141                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8142                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8143                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8144                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8145                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8146                 }
8147                 break;
8148
8149         case DRM_PLANE_TYPE_OVERLAY:
8150                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8151                         if (num_formats >= max_formats)
8152                                 break;
8153
8154                         formats[num_formats++] = overlay_formats[i];
8155                 }
8156                 break;
8157
8158         case DRM_PLANE_TYPE_CURSOR:
8159                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8160                         if (num_formats >= max_formats)
8161                                 break;
8162
8163                         formats[num_formats++] = cursor_formats[i];
8164                 }
8165                 break;
8166         }
8167
8168         return num_formats;
8169 }
8170
8171 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8172                                 struct drm_plane *plane,
8173                                 unsigned long possible_crtcs,
8174                                 const struct dc_plane_cap *plane_cap)
8175 {
8176         uint32_t formats[32];
8177         int num_formats;
8178         int res = -EPERM;
8179         unsigned int supported_rotations;
8180         uint64_t *modifiers = NULL;
8181
8182         num_formats = get_plane_formats(plane, plane_cap, formats,
8183                                         ARRAY_SIZE(formats));
8184
8185         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8186         if (res)
8187                 return res;
8188
8189         if (modifiers == NULL)
8190                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8191
8192         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8193                                        &dm_plane_funcs, formats, num_formats,
8194                                        modifiers, plane->type, NULL);
8195         kfree(modifiers);
8196         if (res)
8197                 return res;
8198
8199         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8200             plane_cap && plane_cap->per_pixel_alpha) {
8201                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8202                                           BIT(DRM_MODE_BLEND_PREMULTI) |
8203                                           BIT(DRM_MODE_BLEND_COVERAGE);
8204
8205                 drm_plane_create_alpha_property(plane);
8206                 drm_plane_create_blend_mode_property(plane, blend_caps);
8207         }
8208
8209         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8210             plane_cap &&
8211             (plane_cap->pixel_format_support.nv12 ||
8212              plane_cap->pixel_format_support.p010)) {
8213                 /* This only affects YUV formats. */
8214                 drm_plane_create_color_properties(
8215                         plane,
8216                         BIT(DRM_COLOR_YCBCR_BT601) |
8217                         BIT(DRM_COLOR_YCBCR_BT709) |
8218                         BIT(DRM_COLOR_YCBCR_BT2020),
8219                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8220                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8221                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8222         }
8223
8224         supported_rotations =
8225                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8226                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8227
8228         if (dm->adev->asic_type >= CHIP_BONAIRE &&
8229             plane->type != DRM_PLANE_TYPE_CURSOR)
8230                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8231                                                    supported_rotations);
8232
8233         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8234
8235         /* Create (reset) the plane state */
8236         if (plane->funcs->reset)
8237                 plane->funcs->reset(plane);
8238
8239         return 0;
8240 }
8241
8242 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8243                                struct drm_plane *plane,
8244                                uint32_t crtc_index)
8245 {
8246         struct amdgpu_crtc *acrtc = NULL;
8247         struct drm_plane *cursor_plane;
8248
8249         int res = -ENOMEM;
8250
8251         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8252         if (!cursor_plane)
8253                 goto fail;
8254
8255         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8256         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8257
8258         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8259         if (!acrtc)
8260                 goto fail;
8261
8262         res = drm_crtc_init_with_planes(
8263                         dm->ddev,
8264                         &acrtc->base,
8265                         plane,
8266                         cursor_plane,
8267                         &amdgpu_dm_crtc_funcs, NULL);
8268
8269         if (res)
8270                 goto fail;
8271
8272         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8273
8274         /* Create (reset) the plane state */
8275         if (acrtc->base.funcs->reset)
8276                 acrtc->base.funcs->reset(&acrtc->base);
8277
8278         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8279         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8280
8281         acrtc->crtc_id = crtc_index;
8282         acrtc->base.enabled = false;
8283         acrtc->otg_inst = -1;
8284
8285         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8286         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8287                                    true, MAX_COLOR_LUT_ENTRIES);
8288         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8289
8290         return 0;
8291
8292 fail:
8293         kfree(acrtc);
8294         kfree(cursor_plane);
8295         return res;
8296 }
8297
8298
8299 static int to_drm_connector_type(enum signal_type st)
8300 {
8301         switch (st) {
8302         case SIGNAL_TYPE_HDMI_TYPE_A:
8303                 return DRM_MODE_CONNECTOR_HDMIA;
8304         case SIGNAL_TYPE_EDP:
8305                 return DRM_MODE_CONNECTOR_eDP;
8306         case SIGNAL_TYPE_LVDS:
8307                 return DRM_MODE_CONNECTOR_LVDS;
8308         case SIGNAL_TYPE_RGB:
8309                 return DRM_MODE_CONNECTOR_VGA;
8310         case SIGNAL_TYPE_DISPLAY_PORT:
8311         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8312                 return DRM_MODE_CONNECTOR_DisplayPort;
8313         case SIGNAL_TYPE_DVI_DUAL_LINK:
8314         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8315                 return DRM_MODE_CONNECTOR_DVID;
8316         case SIGNAL_TYPE_VIRTUAL:
8317                 return DRM_MODE_CONNECTOR_VIRTUAL;
8318
8319         default:
8320                 return DRM_MODE_CONNECTOR_Unknown;
8321         }
8322 }
8323
8324 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8325 {
8326         struct drm_encoder *encoder;
8327
8328         /* There is only one encoder per connector */
8329         drm_connector_for_each_possible_encoder(connector, encoder)
8330                 return encoder;
8331
8332         return NULL;
8333 }
8334
8335 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8336 {
8337         struct drm_encoder *encoder;
8338         struct amdgpu_encoder *amdgpu_encoder;
8339
8340         encoder = amdgpu_dm_connector_to_encoder(connector);
8341
8342         if (encoder == NULL)
8343                 return;
8344
8345         amdgpu_encoder = to_amdgpu_encoder(encoder);
8346
8347         amdgpu_encoder->native_mode.clock = 0;
8348
8349         if (!list_empty(&connector->probed_modes)) {
8350                 struct drm_display_mode *preferred_mode = NULL;
8351
8352                 list_for_each_entry(preferred_mode,
8353                                     &connector->probed_modes,
8354                                     head) {
8355                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8356                                 amdgpu_encoder->native_mode = *preferred_mode;
8357
8358                         break;
8359                 }
8360
8361         }
8362 }
8363
8364 static struct drm_display_mode *
8365 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8366                              char *name,
8367                              int hdisplay, int vdisplay)
8368 {
8369         struct drm_device *dev = encoder->dev;
8370         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8371         struct drm_display_mode *mode = NULL;
8372         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8373
8374         mode = drm_mode_duplicate(dev, native_mode);
8375
8376         if (mode == NULL)
8377                 return NULL;
8378
8379         mode->hdisplay = hdisplay;
8380         mode->vdisplay = vdisplay;
8381         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8382         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8383
8384         return mode;
8385
8386 }
8387
8388 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8389                                                  struct drm_connector *connector)
8390 {
8391         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8392         struct drm_display_mode *mode = NULL;
8393         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8394         struct amdgpu_dm_connector *amdgpu_dm_connector =
8395                                 to_amdgpu_dm_connector(connector);
8396         int i;
8397         int n;
8398         struct mode_size {
8399                 char name[DRM_DISPLAY_MODE_LEN];
8400                 int w;
8401                 int h;
8402         } common_modes[] = {
8403                 {  "640x480",  640,  480},
8404                 {  "800x600",  800,  600},
8405                 { "1024x768", 1024,  768},
8406                 { "1280x720", 1280,  720},
8407                 { "1280x800", 1280,  800},
8408                 {"1280x1024", 1280, 1024},
8409                 { "1440x900", 1440,  900},
8410                 {"1680x1050", 1680, 1050},
8411                 {"1600x1200", 1600, 1200},
8412                 {"1920x1080", 1920, 1080},
8413                 {"1920x1200", 1920, 1200}
8414         };
8415
8416         n = ARRAY_SIZE(common_modes);
8417
8418         for (i = 0; i < n; i++) {
8419                 struct drm_display_mode *curmode = NULL;
8420                 bool mode_existed = false;
8421
8422                 if (common_modes[i].w > native_mode->hdisplay ||
8423                     common_modes[i].h > native_mode->vdisplay ||
8424                    (common_modes[i].w == native_mode->hdisplay &&
8425                     common_modes[i].h == native_mode->vdisplay))
8426                         continue;
8427
8428                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8429                         if (common_modes[i].w == curmode->hdisplay &&
8430                             common_modes[i].h == curmode->vdisplay) {
8431                                 mode_existed = true;
8432                                 break;
8433                         }
8434                 }
8435
8436                 if (mode_existed)
8437                         continue;
8438
8439                 mode = amdgpu_dm_create_common_mode(encoder,
8440                                 common_modes[i].name, common_modes[i].w,
8441                                 common_modes[i].h);
8442                 if (!mode)
8443                         continue;
8444
8445                 drm_mode_probed_add(connector, mode);
8446                 amdgpu_dm_connector->num_modes++;
8447         }
8448 }
8449
8450 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8451 {
8452         struct drm_encoder *encoder;
8453         struct amdgpu_encoder *amdgpu_encoder;
8454         const struct drm_display_mode *native_mode;
8455
8456         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8457             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8458                 return;
8459
8460         encoder = amdgpu_dm_connector_to_encoder(connector);
8461         if (!encoder)
8462                 return;
8463
8464         amdgpu_encoder = to_amdgpu_encoder(encoder);
8465
8466         native_mode = &amdgpu_encoder->native_mode;
8467         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8468                 return;
8469
8470         drm_connector_set_panel_orientation_with_quirk(connector,
8471                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8472                                                        native_mode->hdisplay,
8473                                                        native_mode->vdisplay);
8474 }
8475
8476 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8477                                               struct edid *edid)
8478 {
8479         struct amdgpu_dm_connector *amdgpu_dm_connector =
8480                         to_amdgpu_dm_connector(connector);
8481
8482         if (edid) {
8483                 /* empty probed_modes */
8484                 INIT_LIST_HEAD(&connector->probed_modes);
8485                 amdgpu_dm_connector->num_modes =
8486                                 drm_add_edid_modes(connector, edid);
8487
8488                 /* sorting the probed modes before calling function
8489                  * amdgpu_dm_get_native_mode() since EDID can have
8490                  * more than one preferred mode. The modes that are
8491                  * later in the probed mode list could be of higher
8492                  * and preferred resolution. For example, 3840x2160
8493                  * resolution in base EDID preferred timing and 4096x2160
8494                  * preferred resolution in DID extension block later.
8495                  */
8496                 drm_mode_sort(&connector->probed_modes);
8497                 amdgpu_dm_get_native_mode(connector);
8498
8499                 /* Freesync capabilities are reset by calling
8500                  * drm_add_edid_modes() and need to be
8501                  * restored here.
8502                  */
8503                 amdgpu_dm_update_freesync_caps(connector, edid);
8504
8505                 amdgpu_set_panel_orientation(connector);
8506         } else {
8507                 amdgpu_dm_connector->num_modes = 0;
8508         }
8509 }
8510
8511 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8512                               struct drm_display_mode *mode)
8513 {
8514         struct drm_display_mode *m;
8515
8516         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8517                 if (drm_mode_equal(m, mode))
8518                         return true;
8519         }
8520
8521         return false;
8522 }
8523
8524 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8525 {
8526         const struct drm_display_mode *m;
8527         struct drm_display_mode *new_mode;
8528         uint i;
8529         uint32_t new_modes_count = 0;
8530
8531         /* Standard FPS values
8532          *
8533          * 23.976       - TV/NTSC
8534          * 24           - Cinema
8535          * 25           - TV/PAL
8536          * 29.97        - TV/NTSC
8537          * 30           - TV/NTSC
8538          * 48           - Cinema HFR
8539          * 50           - TV/PAL
8540          * 60           - Commonly used
8541          * 48,72,96,120 - Multiples of 24
8542          */
8543         static const uint32_t common_rates[] = {
8544                 23976, 24000, 25000, 29970, 30000,
8545                 48000, 50000, 60000, 72000, 96000, 120000
8546         };
8547
8548         /*
8549          * Find mode with highest refresh rate with the same resolution
8550          * as the preferred mode. Some monitors report a preferred mode
8551          * with lower resolution than the highest refresh rate supported.
8552          */
8553
8554         m = get_highest_refresh_rate_mode(aconnector, true);
8555         if (!m)
8556                 return 0;
8557
8558         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8559                 uint64_t target_vtotal, target_vtotal_diff;
8560                 uint64_t num, den;
8561
8562                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8563                         continue;
8564
8565                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8566                     common_rates[i] > aconnector->max_vfreq * 1000)
8567                         continue;
8568
8569                 num = (unsigned long long)m->clock * 1000 * 1000;
8570                 den = common_rates[i] * (unsigned long long)m->htotal;
8571                 target_vtotal = div_u64(num, den);
8572                 target_vtotal_diff = target_vtotal - m->vtotal;
8573
8574                 /* Check for illegal modes */
8575                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8576                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8577                     m->vtotal + target_vtotal_diff < m->vsync_end)
8578                         continue;
8579
8580                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8581                 if (!new_mode)
8582                         goto out;
8583
8584                 new_mode->vtotal += (u16)target_vtotal_diff;
8585                 new_mode->vsync_start += (u16)target_vtotal_diff;
8586                 new_mode->vsync_end += (u16)target_vtotal_diff;
8587                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8588                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8589
8590                 if (!is_duplicate_mode(aconnector, new_mode)) {
8591                         drm_mode_probed_add(&aconnector->base, new_mode);
8592                         new_modes_count += 1;
8593                 } else
8594                         drm_mode_destroy(aconnector->base.dev, new_mode);
8595         }
8596  out:
8597         return new_modes_count;
8598 }
8599
8600 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8601                                                    struct edid *edid)
8602 {
8603         struct amdgpu_dm_connector *amdgpu_dm_connector =
8604                 to_amdgpu_dm_connector(connector);
8605
8606         if (!edid)
8607                 return;
8608
8609         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8610                 amdgpu_dm_connector->num_modes +=
8611                         add_fs_modes(amdgpu_dm_connector);
8612 }
8613
8614 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8615 {
8616         struct amdgpu_dm_connector *amdgpu_dm_connector =
8617                         to_amdgpu_dm_connector(connector);
8618         struct drm_encoder *encoder;
8619         struct edid *edid = amdgpu_dm_connector->edid;
8620
8621         encoder = amdgpu_dm_connector_to_encoder(connector);
8622
8623         if (!drm_edid_is_valid(edid)) {
8624                 amdgpu_dm_connector->num_modes =
8625                                 drm_add_modes_noedid(connector, 640, 480);
8626         } else {
8627                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8628                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8629                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8630         }
8631         amdgpu_dm_fbc_init(connector);
8632
8633         return amdgpu_dm_connector->num_modes;
8634 }
8635
8636 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8637                                      struct amdgpu_dm_connector *aconnector,
8638                                      int connector_type,
8639                                      struct dc_link *link,
8640                                      int link_index)
8641 {
8642         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8643
8644         /*
8645          * Some of the properties below require access to state, like bpc.
8646          * Allocate some default initial connector state with our reset helper.
8647          */
8648         if (aconnector->base.funcs->reset)
8649                 aconnector->base.funcs->reset(&aconnector->base);
8650
8651         aconnector->connector_id = link_index;
8652         aconnector->dc_link = link;
8653         aconnector->base.interlace_allowed = false;
8654         aconnector->base.doublescan_allowed = false;
8655         aconnector->base.stereo_allowed = false;
8656         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8657         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8658         aconnector->audio_inst = -1;
8659         mutex_init(&aconnector->hpd_lock);
8660
8661         /*
8662          * configure support HPD hot plug connector_>polled default value is 0
8663          * which means HPD hot plug not supported
8664          */
8665         switch (connector_type) {
8666         case DRM_MODE_CONNECTOR_HDMIA:
8667                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8668                 aconnector->base.ycbcr_420_allowed =
8669                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8670                 break;
8671         case DRM_MODE_CONNECTOR_DisplayPort:
8672                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8673                 link->link_enc = link_enc_cfg_get_link_enc(link);
8674                 ASSERT(link->link_enc);
8675                 if (link->link_enc)
8676                         aconnector->base.ycbcr_420_allowed =
8677                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8678                 break;
8679         case DRM_MODE_CONNECTOR_DVID:
8680                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8681                 break;
8682         default:
8683                 break;
8684         }
8685
8686         drm_object_attach_property(&aconnector->base.base,
8687                                 dm->ddev->mode_config.scaling_mode_property,
8688                                 DRM_MODE_SCALE_NONE);
8689
8690         drm_object_attach_property(&aconnector->base.base,
8691                                 adev->mode_info.underscan_property,
8692                                 UNDERSCAN_OFF);
8693         drm_object_attach_property(&aconnector->base.base,
8694                                 adev->mode_info.underscan_hborder_property,
8695                                 0);
8696         drm_object_attach_property(&aconnector->base.base,
8697                                 adev->mode_info.underscan_vborder_property,
8698                                 0);
8699
8700         if (!aconnector->mst_port)
8701                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8702
8703         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8704         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8705         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8706
8707         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8708             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8709                 drm_object_attach_property(&aconnector->base.base,
8710                                 adev->mode_info.abm_level_property, 0);
8711         }
8712
8713         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8714             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8715             connector_type == DRM_MODE_CONNECTOR_eDP) {
8716                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8717
8718                 if (!aconnector->mst_port)
8719                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8720
8721 #ifdef CONFIG_DRM_AMD_DC_HDCP
8722                 if (adev->dm.hdcp_workqueue)
8723                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8724 #endif
8725         }
8726 }
8727
8728 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8729                               struct i2c_msg *msgs, int num)
8730 {
8731         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8732         struct ddc_service *ddc_service = i2c->ddc_service;
8733         struct i2c_command cmd;
8734         int i;
8735         int result = -EIO;
8736
8737         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8738
8739         if (!cmd.payloads)
8740                 return result;
8741
8742         cmd.number_of_payloads = num;
8743         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8744         cmd.speed = 100;
8745
8746         for (i = 0; i < num; i++) {
8747                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8748                 cmd.payloads[i].address = msgs[i].addr;
8749                 cmd.payloads[i].length = msgs[i].len;
8750                 cmd.payloads[i].data = msgs[i].buf;
8751         }
8752
8753         if (dc_submit_i2c(
8754                         ddc_service->ctx->dc,
8755                         ddc_service->link->link_index,
8756                         &cmd))
8757                 result = num;
8758
8759         kfree(cmd.payloads);
8760         return result;
8761 }
8762
8763 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8764 {
8765         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8766 }
8767
8768 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8769         .master_xfer = amdgpu_dm_i2c_xfer,
8770         .functionality = amdgpu_dm_i2c_func,
8771 };
8772
8773 static struct amdgpu_i2c_adapter *
8774 create_i2c(struct ddc_service *ddc_service,
8775            int link_index,
8776            int *res)
8777 {
8778         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8779         struct amdgpu_i2c_adapter *i2c;
8780
8781         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8782         if (!i2c)
8783                 return NULL;
8784         i2c->base.owner = THIS_MODULE;
8785         i2c->base.class = I2C_CLASS_DDC;
8786         i2c->base.dev.parent = &adev->pdev->dev;
8787         i2c->base.algo = &amdgpu_dm_i2c_algo;
8788         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8789         i2c_set_adapdata(&i2c->base, i2c);
8790         i2c->ddc_service = ddc_service;
8791
8792         return i2c;
8793 }
8794
8795
8796 /*
8797  * Note: this function assumes that dc_link_detect() was called for the
8798  * dc_link which will be represented by this aconnector.
8799  */
8800 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8801                                     struct amdgpu_dm_connector *aconnector,
8802                                     uint32_t link_index,
8803                                     struct amdgpu_encoder *aencoder)
8804 {
8805         int res = 0;
8806         int connector_type;
8807         struct dc *dc = dm->dc;
8808         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8809         struct amdgpu_i2c_adapter *i2c;
8810
8811         link->priv = aconnector;
8812
8813         DRM_DEBUG_DRIVER("%s()\n", __func__);
8814
8815         i2c = create_i2c(link->ddc, link->link_index, &res);
8816         if (!i2c) {
8817                 DRM_ERROR("Failed to create i2c adapter data\n");
8818                 return -ENOMEM;
8819         }
8820
8821         aconnector->i2c = i2c;
8822         res = i2c_add_adapter(&i2c->base);
8823
8824         if (res) {
8825                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8826                 goto out_free;
8827         }
8828
8829         connector_type = to_drm_connector_type(link->connector_signal);
8830
8831         res = drm_connector_init_with_ddc(
8832                         dm->ddev,
8833                         &aconnector->base,
8834                         &amdgpu_dm_connector_funcs,
8835                         connector_type,
8836                         &i2c->base);
8837
8838         if (res) {
8839                 DRM_ERROR("connector_init failed\n");
8840                 aconnector->connector_id = -1;
8841                 goto out_free;
8842         }
8843
8844         drm_connector_helper_add(
8845                         &aconnector->base,
8846                         &amdgpu_dm_connector_helper_funcs);
8847
8848         amdgpu_dm_connector_init_helper(
8849                 dm,
8850                 aconnector,
8851                 connector_type,
8852                 link,
8853                 link_index);
8854
8855         drm_connector_attach_encoder(
8856                 &aconnector->base, &aencoder->base);
8857
8858         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8859                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8860                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8861
8862 out_free:
8863         if (res) {
8864                 kfree(i2c);
8865                 aconnector->i2c = NULL;
8866         }
8867         return res;
8868 }
8869
8870 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8871 {
8872         switch (adev->mode_info.num_crtc) {
8873         case 1:
8874                 return 0x1;
8875         case 2:
8876                 return 0x3;
8877         case 3:
8878                 return 0x7;
8879         case 4:
8880                 return 0xf;
8881         case 5:
8882                 return 0x1f;
8883         case 6:
8884         default:
8885                 return 0x3f;
8886         }
8887 }
8888
8889 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8890                                   struct amdgpu_encoder *aencoder,
8891                                   uint32_t link_index)
8892 {
8893         struct amdgpu_device *adev = drm_to_adev(dev);
8894
8895         int res = drm_encoder_init(dev,
8896                                    &aencoder->base,
8897                                    &amdgpu_dm_encoder_funcs,
8898                                    DRM_MODE_ENCODER_TMDS,
8899                                    NULL);
8900
8901         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8902
8903         if (!res)
8904                 aencoder->encoder_id = link_index;
8905         else
8906                 aencoder->encoder_id = -1;
8907
8908         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8909
8910         return res;
8911 }
8912
8913 static void manage_dm_interrupts(struct amdgpu_device *adev,
8914                                  struct amdgpu_crtc *acrtc,
8915                                  bool enable)
8916 {
8917         /*
8918          * We have no guarantee that the frontend index maps to the same
8919          * backend index - some even map to more than one.
8920          *
8921          * TODO: Use a different interrupt or check DC itself for the mapping.
8922          */
8923         int irq_type =
8924                 amdgpu_display_crtc_idx_to_irq_type(
8925                         adev,
8926                         acrtc->crtc_id);
8927
8928         if (enable) {
8929                 drm_crtc_vblank_on(&acrtc->base);
8930                 amdgpu_irq_get(
8931                         adev,
8932                         &adev->pageflip_irq,
8933                         irq_type);
8934 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8935                 amdgpu_irq_get(
8936                         adev,
8937                         &adev->vline0_irq,
8938                         irq_type);
8939 #endif
8940         } else {
8941 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8942                 amdgpu_irq_put(
8943                         adev,
8944                         &adev->vline0_irq,
8945                         irq_type);
8946 #endif
8947                 amdgpu_irq_put(
8948                         adev,
8949                         &adev->pageflip_irq,
8950                         irq_type);
8951                 drm_crtc_vblank_off(&acrtc->base);
8952         }
8953 }
8954
8955 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8956                                       struct amdgpu_crtc *acrtc)
8957 {
8958         int irq_type =
8959                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8960
8961         /**
8962          * This reads the current state for the IRQ and force reapplies
8963          * the setting to hardware.
8964          */
8965         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8966 }
8967
8968 static bool
8969 is_scaling_state_different(const struct dm_connector_state *dm_state,
8970                            const struct dm_connector_state *old_dm_state)
8971 {
8972         if (dm_state->scaling != old_dm_state->scaling)
8973                 return true;
8974         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8975                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8976                         return true;
8977         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8978                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8979                         return true;
8980         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8981                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8982                 return true;
8983         return false;
8984 }
8985
8986 #ifdef CONFIG_DRM_AMD_DC_HDCP
8987 static bool is_content_protection_different(struct drm_connector_state *state,
8988                                             const struct drm_connector_state *old_state,
8989                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8990 {
8991         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8992         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8993
8994         /* Handle: Type0/1 change */
8995         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8996             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8997                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8998                 return true;
8999         }
9000
9001         /* CP is being re enabled, ignore this
9002          *
9003          * Handles:     ENABLED -> DESIRED
9004          */
9005         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
9006             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9007                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
9008                 return false;
9009         }
9010
9011         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9012          *
9013          * Handles:     UNDESIRED -> ENABLED
9014          */
9015         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9016             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9017                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9018
9019         /* Stream removed and re-enabled
9020          *
9021          * Can sometimes overlap with the HPD case,
9022          * thus set update_hdcp to false to avoid
9023          * setting HDCP multiple times.
9024          *
9025          * Handles:     DESIRED -> DESIRED (Special case)
9026          */
9027         if (!(old_state->crtc && old_state->crtc->enabled) &&
9028                 state->crtc && state->crtc->enabled &&
9029                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9030                 dm_con_state->update_hdcp = false;
9031                 return true;
9032         }
9033
9034         /* Hot-plug, headless s3, dpms
9035          *
9036          * Only start HDCP if the display is connected/enabled.
9037          * update_hdcp flag will be set to false until the next
9038          * HPD comes in.
9039          *
9040          * Handles:     DESIRED -> DESIRED (Special case)
9041          */
9042         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9043             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9044                 dm_con_state->update_hdcp = false;
9045                 return true;
9046         }
9047
9048         /*
9049          * Handles:     UNDESIRED -> UNDESIRED
9050          *              DESIRED -> DESIRED
9051          *              ENABLED -> ENABLED
9052          */
9053         if (old_state->content_protection == state->content_protection)
9054                 return false;
9055
9056         /*
9057          * Handles:     UNDESIRED -> DESIRED
9058          *              DESIRED -> UNDESIRED
9059          *              ENABLED -> UNDESIRED
9060          */
9061         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9062                 return true;
9063
9064         /*
9065          * Handles:     DESIRED -> ENABLED
9066          */
9067         return false;
9068 }
9069
9070 #endif
9071 static void remove_stream(struct amdgpu_device *adev,
9072                           struct amdgpu_crtc *acrtc,
9073                           struct dc_stream_state *stream)
9074 {
9075         /* this is the update mode case */
9076
9077         acrtc->otg_inst = -1;
9078         acrtc->enabled = false;
9079 }
9080
9081 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9082                                struct dc_cursor_position *position)
9083 {
9084         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9085         int x, y;
9086         int xorigin = 0, yorigin = 0;
9087
9088         if (!crtc || !plane->state->fb)
9089                 return 0;
9090
9091         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9092             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9093                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9094                           __func__,
9095                           plane->state->crtc_w,
9096                           plane->state->crtc_h);
9097                 return -EINVAL;
9098         }
9099
9100         x = plane->state->crtc_x;
9101         y = plane->state->crtc_y;
9102
9103         if (x <= -amdgpu_crtc->max_cursor_width ||
9104             y <= -amdgpu_crtc->max_cursor_height)
9105                 return 0;
9106
9107         if (x < 0) {
9108                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9109                 x = 0;
9110         }
9111         if (y < 0) {
9112                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9113                 y = 0;
9114         }
9115         position->enable = true;
9116         position->translate_by_source = true;
9117         position->x = x;
9118         position->y = y;
9119         position->x_hotspot = xorigin;
9120         position->y_hotspot = yorigin;
9121
9122         return 0;
9123 }
9124
9125 static void handle_cursor_update(struct drm_plane *plane,
9126                                  struct drm_plane_state *old_plane_state)
9127 {
9128         struct amdgpu_device *adev = drm_to_adev(plane->dev);
9129         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9130         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9131         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9132         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9133         uint64_t address = afb ? afb->address : 0;
9134         struct dc_cursor_position position = {0};
9135         struct dc_cursor_attributes attributes;
9136         int ret;
9137
9138         if (!plane->state->fb && !old_plane_state->fb)
9139                 return;
9140
9141         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9142                       __func__,
9143                       amdgpu_crtc->crtc_id,
9144                       plane->state->crtc_w,
9145                       plane->state->crtc_h);
9146
9147         ret = get_cursor_position(plane, crtc, &position);
9148         if (ret)
9149                 return;
9150
9151         if (!position.enable) {
9152                 /* turn off cursor */
9153                 if (crtc_state && crtc_state->stream) {
9154                         mutex_lock(&adev->dm.dc_lock);
9155                         dc_stream_set_cursor_position(crtc_state->stream,
9156                                                       &position);
9157                         mutex_unlock(&adev->dm.dc_lock);
9158                 }
9159                 return;
9160         }
9161
9162         amdgpu_crtc->cursor_width = plane->state->crtc_w;
9163         amdgpu_crtc->cursor_height = plane->state->crtc_h;
9164
9165         memset(&attributes, 0, sizeof(attributes));
9166         attributes.address.high_part = upper_32_bits(address);
9167         attributes.address.low_part  = lower_32_bits(address);
9168         attributes.width             = plane->state->crtc_w;
9169         attributes.height            = plane->state->crtc_h;
9170         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9171         attributes.rotation_angle    = 0;
9172         attributes.attribute_flags.value = 0;
9173
9174         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9175
9176         if (crtc_state->stream) {
9177                 mutex_lock(&adev->dm.dc_lock);
9178                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9179                                                          &attributes))
9180                         DRM_ERROR("DC failed to set cursor attributes\n");
9181
9182                 if (!dc_stream_set_cursor_position(crtc_state->stream,
9183                                                    &position))
9184                         DRM_ERROR("DC failed to set cursor position\n");
9185                 mutex_unlock(&adev->dm.dc_lock);
9186         }
9187 }
9188
9189 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9190 {
9191
9192         assert_spin_locked(&acrtc->base.dev->event_lock);
9193         WARN_ON(acrtc->event);
9194
9195         acrtc->event = acrtc->base.state->event;
9196
9197         /* Set the flip status */
9198         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9199
9200         /* Mark this event as consumed */
9201         acrtc->base.state->event = NULL;
9202
9203         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9204                      acrtc->crtc_id);
9205 }
9206
9207 static void update_freesync_state_on_stream(
9208         struct amdgpu_display_manager *dm,
9209         struct dm_crtc_state *new_crtc_state,
9210         struct dc_stream_state *new_stream,
9211         struct dc_plane_state *surface,
9212         u32 flip_timestamp_in_us)
9213 {
9214         struct mod_vrr_params vrr_params;
9215         struct dc_info_packet vrr_infopacket = {0};
9216         struct amdgpu_device *adev = dm->adev;
9217         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9218         unsigned long flags;
9219         bool pack_sdp_v1_3 = false;
9220
9221         if (!new_stream)
9222                 return;
9223
9224         /*
9225          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9226          * For now it's sufficient to just guard against these conditions.
9227          */
9228
9229         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9230                 return;
9231
9232         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9233         vrr_params = acrtc->dm_irq_params.vrr_params;
9234
9235         if (surface) {
9236                 mod_freesync_handle_preflip(
9237                         dm->freesync_module,
9238                         surface,
9239                         new_stream,
9240                         flip_timestamp_in_us,
9241                         &vrr_params);
9242
9243                 if (adev->family < AMDGPU_FAMILY_AI &&
9244                     amdgpu_dm_vrr_active(new_crtc_state)) {
9245                         mod_freesync_handle_v_update(dm->freesync_module,
9246                                                      new_stream, &vrr_params);
9247
9248                         /* Need to call this before the frame ends. */
9249                         dc_stream_adjust_vmin_vmax(dm->dc,
9250                                                    new_crtc_state->stream,
9251                                                    &vrr_params.adjust);
9252                 }
9253         }
9254
9255         mod_freesync_build_vrr_infopacket(
9256                 dm->freesync_module,
9257                 new_stream,
9258                 &vrr_params,
9259                 PACKET_TYPE_VRR,
9260                 TRANSFER_FUNC_UNKNOWN,
9261                 &vrr_infopacket,
9262                 pack_sdp_v1_3);
9263
9264         new_crtc_state->freesync_timing_changed |=
9265                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9266                         &vrr_params.adjust,
9267                         sizeof(vrr_params.adjust)) != 0);
9268
9269         new_crtc_state->freesync_vrr_info_changed |=
9270                 (memcmp(&new_crtc_state->vrr_infopacket,
9271                         &vrr_infopacket,
9272                         sizeof(vrr_infopacket)) != 0);
9273
9274         acrtc->dm_irq_params.vrr_params = vrr_params;
9275         new_crtc_state->vrr_infopacket = vrr_infopacket;
9276
9277         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9278         new_stream->vrr_infopacket = vrr_infopacket;
9279
9280         if (new_crtc_state->freesync_vrr_info_changed)
9281                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9282                               new_crtc_state->base.crtc->base.id,
9283                               (int)new_crtc_state->base.vrr_enabled,
9284                               (int)vrr_params.state);
9285
9286         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9287 }
9288
9289 static void update_stream_irq_parameters(
9290         struct amdgpu_display_manager *dm,
9291         struct dm_crtc_state *new_crtc_state)
9292 {
9293         struct dc_stream_state *new_stream = new_crtc_state->stream;
9294         struct mod_vrr_params vrr_params;
9295         struct mod_freesync_config config = new_crtc_state->freesync_config;
9296         struct amdgpu_device *adev = dm->adev;
9297         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9298         unsigned long flags;
9299
9300         if (!new_stream)
9301                 return;
9302
9303         /*
9304          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9305          * For now it's sufficient to just guard against these conditions.
9306          */
9307         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9308                 return;
9309
9310         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9311         vrr_params = acrtc->dm_irq_params.vrr_params;
9312
9313         if (new_crtc_state->vrr_supported &&
9314             config.min_refresh_in_uhz &&
9315             config.max_refresh_in_uhz) {
9316                 /*
9317                  * if freesync compatible mode was set, config.state will be set
9318                  * in atomic check
9319                  */
9320                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9321                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9322                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9323                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9324                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9325                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9326                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9327                 } else {
9328                         config.state = new_crtc_state->base.vrr_enabled ?
9329                                                      VRR_STATE_ACTIVE_VARIABLE :
9330                                                      VRR_STATE_INACTIVE;
9331                 }
9332         } else {
9333                 config.state = VRR_STATE_UNSUPPORTED;
9334         }
9335
9336         mod_freesync_build_vrr_params(dm->freesync_module,
9337                                       new_stream,
9338                                       &config, &vrr_params);
9339
9340         new_crtc_state->freesync_timing_changed |=
9341                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9342                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9343
9344         new_crtc_state->freesync_config = config;
9345         /* Copy state for access from DM IRQ handler */
9346         acrtc->dm_irq_params.freesync_config = config;
9347         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9348         acrtc->dm_irq_params.vrr_params = vrr_params;
9349         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9350 }
9351
9352 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9353                                             struct dm_crtc_state *new_state)
9354 {
9355         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9356         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9357
9358         if (!old_vrr_active && new_vrr_active) {
9359                 /* Transition VRR inactive -> active:
9360                  * While VRR is active, we must not disable vblank irq, as a
9361                  * reenable after disable would compute bogus vblank/pflip
9362                  * timestamps if it likely happened inside display front-porch.
9363                  *
9364                  * We also need vupdate irq for the actual core vblank handling
9365                  * at end of vblank.
9366                  */
9367                 dm_set_vupdate_irq(new_state->base.crtc, true);
9368                 drm_crtc_vblank_get(new_state->base.crtc);
9369                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9370                                  __func__, new_state->base.crtc->base.id);
9371         } else if (old_vrr_active && !new_vrr_active) {
9372                 /* Transition VRR active -> inactive:
9373                  * Allow vblank irq disable again for fixed refresh rate.
9374                  */
9375                 dm_set_vupdate_irq(new_state->base.crtc, false);
9376                 drm_crtc_vblank_put(new_state->base.crtc);
9377                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9378                                  __func__, new_state->base.crtc->base.id);
9379         }
9380 }
9381
9382 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9383 {
9384         struct drm_plane *plane;
9385         struct drm_plane_state *old_plane_state;
9386         int i;
9387
9388         /*
9389          * TODO: Make this per-stream so we don't issue redundant updates for
9390          * commits with multiple streams.
9391          */
9392         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9393                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9394                         handle_cursor_update(plane, old_plane_state);
9395 }
9396
9397 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9398                                     struct dc_state *dc_state,
9399                                     struct drm_device *dev,
9400                                     struct amdgpu_display_manager *dm,
9401                                     struct drm_crtc *pcrtc,
9402                                     bool wait_for_vblank)
9403 {
9404         uint32_t i;
9405         uint64_t timestamp_ns;
9406         struct drm_plane *plane;
9407         struct drm_plane_state *old_plane_state, *new_plane_state;
9408         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9409         struct drm_crtc_state *new_pcrtc_state =
9410                         drm_atomic_get_new_crtc_state(state, pcrtc);
9411         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9412         struct dm_crtc_state *dm_old_crtc_state =
9413                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9414         int planes_count = 0, vpos, hpos;
9415         long r;
9416         unsigned long flags;
9417         struct amdgpu_bo *abo;
9418         uint32_t target_vblank, last_flip_vblank;
9419         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9420         bool cursor_update = false;
9421         bool pflip_present = false;
9422         struct {
9423                 struct dc_surface_update surface_updates[MAX_SURFACES];
9424                 struct dc_plane_info plane_infos[MAX_SURFACES];
9425                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9426                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9427                 struct dc_stream_update stream_update;
9428         } *bundle;
9429
9430         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9431
9432         if (!bundle) {
9433                 dm_error("Failed to allocate update bundle\n");
9434                 goto cleanup;
9435         }
9436
9437         /*
9438          * Disable the cursor first if we're disabling all the planes.
9439          * It'll remain on the screen after the planes are re-enabled
9440          * if we don't.
9441          */
9442         if (acrtc_state->active_planes == 0)
9443                 amdgpu_dm_commit_cursors(state);
9444
9445         /* update planes when needed */
9446         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9447                 struct drm_crtc *crtc = new_plane_state->crtc;
9448                 struct drm_crtc_state *new_crtc_state;
9449                 struct drm_framebuffer *fb = new_plane_state->fb;
9450                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9451                 bool plane_needs_flip;
9452                 struct dc_plane_state *dc_plane;
9453                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9454
9455                 /* Cursor plane is handled after stream updates */
9456                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9457                         if ((fb && crtc == pcrtc) ||
9458                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9459                                 cursor_update = true;
9460
9461                         continue;
9462                 }
9463
9464                 if (!fb || !crtc || pcrtc != crtc)
9465                         continue;
9466
9467                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9468                 if (!new_crtc_state->active)
9469                         continue;
9470
9471                 dc_plane = dm_new_plane_state->dc_state;
9472
9473                 bundle->surface_updates[planes_count].surface = dc_plane;
9474                 if (new_pcrtc_state->color_mgmt_changed) {
9475                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9476                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9477                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9478                 }
9479
9480                 fill_dc_scaling_info(dm->adev, new_plane_state,
9481                                      &bundle->scaling_infos[planes_count]);
9482
9483                 bundle->surface_updates[planes_count].scaling_info =
9484                         &bundle->scaling_infos[planes_count];
9485
9486                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9487
9488                 pflip_present = pflip_present || plane_needs_flip;
9489
9490                 if (!plane_needs_flip) {
9491                         planes_count += 1;
9492                         continue;
9493                 }
9494
9495                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9496
9497                 /*
9498                  * Wait for all fences on this FB. Do limited wait to avoid
9499                  * deadlock during GPU reset when this fence will not signal
9500                  * but we hold reservation lock for the BO.
9501                  */
9502                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9503                                           DMA_RESV_USAGE_WRITE, false,
9504                                           msecs_to_jiffies(5000));
9505                 if (unlikely(r <= 0))
9506                         DRM_ERROR("Waiting for fences timed out!");
9507
9508                 fill_dc_plane_info_and_addr(
9509                         dm->adev, new_plane_state,
9510                         afb->tiling_flags,
9511                         &bundle->plane_infos[planes_count],
9512                         &bundle->flip_addrs[planes_count].address,
9513                         afb->tmz_surface, false);
9514
9515                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9516                                  new_plane_state->plane->index,
9517                                  bundle->plane_infos[planes_count].dcc.enable);
9518
9519                 bundle->surface_updates[planes_count].plane_info =
9520                         &bundle->plane_infos[planes_count];
9521
9522                 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9523                                     new_crtc_state,
9524                                     &bundle->flip_addrs[planes_count]);
9525
9526                 /*
9527                  * Only allow immediate flips for fast updates that don't
9528                  * change FB pitch, DCC state, rotation or mirroing.
9529                  */
9530                 bundle->flip_addrs[planes_count].flip_immediate =
9531                         crtc->state->async_flip &&
9532                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9533
9534                 timestamp_ns = ktime_get_ns();
9535                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9536                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9537                 bundle->surface_updates[planes_count].surface = dc_plane;
9538
9539                 if (!bundle->surface_updates[planes_count].surface) {
9540                         DRM_ERROR("No surface for CRTC: id=%d\n",
9541                                         acrtc_attach->crtc_id);
9542                         continue;
9543                 }
9544
9545                 if (plane == pcrtc->primary)
9546                         update_freesync_state_on_stream(
9547                                 dm,
9548                                 acrtc_state,
9549                                 acrtc_state->stream,
9550                                 dc_plane,
9551                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9552
9553                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9554                                  __func__,
9555                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9556                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9557
9558                 planes_count += 1;
9559
9560         }
9561
9562         if (pflip_present) {
9563                 if (!vrr_active) {
9564                         /* Use old throttling in non-vrr fixed refresh rate mode
9565                          * to keep flip scheduling based on target vblank counts
9566                          * working in a backwards compatible way, e.g., for
9567                          * clients using the GLX_OML_sync_control extension or
9568                          * DRI3/Present extension with defined target_msc.
9569                          */
9570                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9571                 }
9572                 else {
9573                         /* For variable refresh rate mode only:
9574                          * Get vblank of last completed flip to avoid > 1 vrr
9575                          * flips per video frame by use of throttling, but allow
9576                          * flip programming anywhere in the possibly large
9577                          * variable vrr vblank interval for fine-grained flip
9578                          * timing control and more opportunity to avoid stutter
9579                          * on late submission of flips.
9580                          */
9581                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9582                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9583                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9584                 }
9585
9586                 target_vblank = last_flip_vblank + wait_for_vblank;
9587
9588                 /*
9589                  * Wait until we're out of the vertical blank period before the one
9590                  * targeted by the flip
9591                  */
9592                 while ((acrtc_attach->enabled &&
9593                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9594                                                             0, &vpos, &hpos, NULL,
9595                                                             NULL, &pcrtc->hwmode)
9596                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9597                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9598                         (int)(target_vblank -
9599                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9600                         usleep_range(1000, 1100);
9601                 }
9602
9603                 /**
9604                  * Prepare the flip event for the pageflip interrupt to handle.
9605                  *
9606                  * This only works in the case where we've already turned on the
9607                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9608                  * from 0 -> n planes we have to skip a hardware generated event
9609                  * and rely on sending it from software.
9610                  */
9611                 if (acrtc_attach->base.state->event &&
9612                     acrtc_state->active_planes > 0) {
9613                         drm_crtc_vblank_get(pcrtc);
9614
9615                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9616
9617                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9618                         prepare_flip_isr(acrtc_attach);
9619
9620                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9621                 }
9622
9623                 if (acrtc_state->stream) {
9624                         if (acrtc_state->freesync_vrr_info_changed)
9625                                 bundle->stream_update.vrr_infopacket =
9626                                         &acrtc_state->stream->vrr_infopacket;
9627                 }
9628         } else if (cursor_update && acrtc_state->active_planes > 0 &&
9629                    acrtc_attach->base.state->event) {
9630                 drm_crtc_vblank_get(pcrtc);
9631
9632                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9633
9634                 acrtc_attach->event = acrtc_attach->base.state->event;
9635                 acrtc_attach->base.state->event = NULL;
9636
9637                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9638         }
9639
9640         /* Update the planes if changed or disable if we don't have any. */
9641         if ((planes_count || acrtc_state->active_planes == 0) &&
9642                 acrtc_state->stream) {
9643                 /*
9644                  * If PSR or idle optimizations are enabled then flush out
9645                  * any pending work before hardware programming.
9646                  */
9647                 if (dm->vblank_control_workqueue)
9648                         flush_workqueue(dm->vblank_control_workqueue);
9649
9650                 bundle->stream_update.stream = acrtc_state->stream;
9651                 if (new_pcrtc_state->mode_changed) {
9652                         bundle->stream_update.src = acrtc_state->stream->src;
9653                         bundle->stream_update.dst = acrtc_state->stream->dst;
9654                 }
9655
9656                 if (new_pcrtc_state->color_mgmt_changed) {
9657                         /*
9658                          * TODO: This isn't fully correct since we've actually
9659                          * already modified the stream in place.
9660                          */
9661                         bundle->stream_update.gamut_remap =
9662                                 &acrtc_state->stream->gamut_remap_matrix;
9663                         bundle->stream_update.output_csc_transform =
9664                                 &acrtc_state->stream->csc_color_matrix;
9665                         bundle->stream_update.out_transfer_func =
9666                                 acrtc_state->stream->out_transfer_func;
9667                 }
9668
9669                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9670                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9671                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9672
9673                 /*
9674                  * If FreeSync state on the stream has changed then we need to
9675                  * re-adjust the min/max bounds now that DC doesn't handle this
9676                  * as part of commit.
9677                  */
9678                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9679                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9680                         dc_stream_adjust_vmin_vmax(
9681                                 dm->dc, acrtc_state->stream,
9682                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9683                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9684                 }
9685                 mutex_lock(&dm->dc_lock);
9686                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9687                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9688                         amdgpu_dm_psr_disable(acrtc_state->stream);
9689
9690                 dc_commit_updates_for_stream(dm->dc,
9691                                                      bundle->surface_updates,
9692                                                      planes_count,
9693                                                      acrtc_state->stream,
9694                                                      &bundle->stream_update,
9695                                                      dc_state);
9696
9697                 /**
9698                  * Enable or disable the interrupts on the backend.
9699                  *
9700                  * Most pipes are put into power gating when unused.
9701                  *
9702                  * When power gating is enabled on a pipe we lose the
9703                  * interrupt enablement state when power gating is disabled.
9704                  *
9705                  * So we need to update the IRQ control state in hardware
9706                  * whenever the pipe turns on (since it could be previously
9707                  * power gated) or off (since some pipes can't be power gated
9708                  * on some ASICs).
9709                  */
9710                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9711                         dm_update_pflip_irq_state(drm_to_adev(dev),
9712                                                   acrtc_attach);
9713
9714                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9715                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9716                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9717                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9718
9719                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9720                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9721                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9722                         struct amdgpu_dm_connector *aconn =
9723                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9724
9725                         if (aconn->psr_skip_count > 0)
9726                                 aconn->psr_skip_count--;
9727
9728                         /* Allow PSR when skip count is 0. */
9729                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9730
9731                         /*
9732                          * If sink supports PSR SU, there is no need to rely on
9733                          * a vblank event disable request to enable PSR. PSR SU
9734                          * can be enabled immediately once OS demonstrates an
9735                          * adequate number of fast atomic commits to notify KMD
9736                          * of update events. See `vblank_control_worker()`.
9737                          */
9738                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9739                             acrtc_attach->dm_irq_params.allow_psr_entry &&
9740                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
9741                                 amdgpu_dm_psr_enable(acrtc_state->stream);
9742                 } else {
9743                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9744                 }
9745
9746                 mutex_unlock(&dm->dc_lock);
9747         }
9748
9749         /*
9750          * Update cursor state *after* programming all the planes.
9751          * This avoids redundant programming in the case where we're going
9752          * to be disabling a single plane - those pipes are being disabled.
9753          */
9754         if (acrtc_state->active_planes)
9755                 amdgpu_dm_commit_cursors(state);
9756
9757 cleanup:
9758         kfree(bundle);
9759 }
9760
9761 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9762                                    struct drm_atomic_state *state)
9763 {
9764         struct amdgpu_device *adev = drm_to_adev(dev);
9765         struct amdgpu_dm_connector *aconnector;
9766         struct drm_connector *connector;
9767         struct drm_connector_state *old_con_state, *new_con_state;
9768         struct drm_crtc_state *new_crtc_state;
9769         struct dm_crtc_state *new_dm_crtc_state;
9770         const struct dc_stream_status *status;
9771         int i, inst;
9772
9773         /* Notify device removals. */
9774         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9775                 if (old_con_state->crtc != new_con_state->crtc) {
9776                         /* CRTC changes require notification. */
9777                         goto notify;
9778                 }
9779
9780                 if (!new_con_state->crtc)
9781                         continue;
9782
9783                 new_crtc_state = drm_atomic_get_new_crtc_state(
9784                         state, new_con_state->crtc);
9785
9786                 if (!new_crtc_state)
9787                         continue;
9788
9789                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9790                         continue;
9791
9792         notify:
9793                 aconnector = to_amdgpu_dm_connector(connector);
9794
9795                 mutex_lock(&adev->dm.audio_lock);
9796                 inst = aconnector->audio_inst;
9797                 aconnector->audio_inst = -1;
9798                 mutex_unlock(&adev->dm.audio_lock);
9799
9800                 amdgpu_dm_audio_eld_notify(adev, inst);
9801         }
9802
9803         /* Notify audio device additions. */
9804         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9805                 if (!new_con_state->crtc)
9806                         continue;
9807
9808                 new_crtc_state = drm_atomic_get_new_crtc_state(
9809                         state, new_con_state->crtc);
9810
9811                 if (!new_crtc_state)
9812                         continue;
9813
9814                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9815                         continue;
9816
9817                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9818                 if (!new_dm_crtc_state->stream)
9819                         continue;
9820
9821                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9822                 if (!status)
9823                         continue;
9824
9825                 aconnector = to_amdgpu_dm_connector(connector);
9826
9827                 mutex_lock(&adev->dm.audio_lock);
9828                 inst = status->audio_inst;
9829                 aconnector->audio_inst = inst;
9830                 mutex_unlock(&adev->dm.audio_lock);
9831
9832                 amdgpu_dm_audio_eld_notify(adev, inst);
9833         }
9834 }
9835
9836 /*
9837  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9838  * @crtc_state: the DRM CRTC state
9839  * @stream_state: the DC stream state.
9840  *
9841  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9842  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9843  */
9844 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9845                                                 struct dc_stream_state *stream_state)
9846 {
9847         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9848 }
9849
9850 /**
9851  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9852  * @state: The atomic state to commit
9853  *
9854  * This will tell DC to commit the constructed DC state from atomic_check,
9855  * programming the hardware. Any failures here implies a hardware failure, since
9856  * atomic check should have filtered anything non-kosher.
9857  */
9858 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9859 {
9860         struct drm_device *dev = state->dev;
9861         struct amdgpu_device *adev = drm_to_adev(dev);
9862         struct amdgpu_display_manager *dm = &adev->dm;
9863         struct dm_atomic_state *dm_state;
9864         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9865         uint32_t i, j;
9866         struct drm_crtc *crtc;
9867         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9868         unsigned long flags;
9869         bool wait_for_vblank = true;
9870         struct drm_connector *connector;
9871         struct drm_connector_state *old_con_state, *new_con_state;
9872         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9873         int crtc_disable_count = 0;
9874         bool mode_set_reset_required = false;
9875
9876         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9877
9878         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9879
9880         dm_state = dm_atomic_get_new_state(state);
9881         if (dm_state && dm_state->context) {
9882                 dc_state = dm_state->context;
9883         } else {
9884                 /* No state changes, retain current state. */
9885                 dc_state_temp = dc_create_state(dm->dc);
9886                 ASSERT(dc_state_temp);
9887                 dc_state = dc_state_temp;
9888                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9889         }
9890
9891         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9892                                        new_crtc_state, i) {
9893                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9894
9895                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9896
9897                 if (old_crtc_state->active &&
9898                     (!new_crtc_state->active ||
9899                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9900                         manage_dm_interrupts(adev, acrtc, false);
9901                         dc_stream_release(dm_old_crtc_state->stream);
9902                 }
9903         }
9904
9905         drm_atomic_helper_calc_timestamping_constants(state);
9906
9907         /* update changed items */
9908         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9909                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9910
9911                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9912                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9913
9914                 drm_dbg_state(state->dev,
9915                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9916                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9917                         "connectors_changed:%d\n",
9918                         acrtc->crtc_id,
9919                         new_crtc_state->enable,
9920                         new_crtc_state->active,
9921                         new_crtc_state->planes_changed,
9922                         new_crtc_state->mode_changed,
9923                         new_crtc_state->active_changed,
9924                         new_crtc_state->connectors_changed);
9925
9926                 /* Disable cursor if disabling crtc */
9927                 if (old_crtc_state->active && !new_crtc_state->active) {
9928                         struct dc_cursor_position position;
9929
9930                         memset(&position, 0, sizeof(position));
9931                         mutex_lock(&dm->dc_lock);
9932                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9933                         mutex_unlock(&dm->dc_lock);
9934                 }
9935
9936                 /* Copy all transient state flags into dc state */
9937                 if (dm_new_crtc_state->stream) {
9938                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9939                                                             dm_new_crtc_state->stream);
9940                 }
9941
9942                 /* handles headless hotplug case, updating new_state and
9943                  * aconnector as needed
9944                  */
9945
9946                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9947
9948                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9949
9950                         if (!dm_new_crtc_state->stream) {
9951                                 /*
9952                                  * this could happen because of issues with
9953                                  * userspace notifications delivery.
9954                                  * In this case userspace tries to set mode on
9955                                  * display which is disconnected in fact.
9956                                  * dc_sink is NULL in this case on aconnector.
9957                                  * We expect reset mode will come soon.
9958                                  *
9959                                  * This can also happen when unplug is done
9960                                  * during resume sequence ended
9961                                  *
9962                                  * In this case, we want to pretend we still
9963                                  * have a sink to keep the pipe running so that
9964                                  * hw state is consistent with the sw state
9965                                  */
9966                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9967                                                 __func__, acrtc->base.base.id);
9968                                 continue;
9969                         }
9970
9971                         if (dm_old_crtc_state->stream)
9972                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9973
9974                         pm_runtime_get_noresume(dev->dev);
9975
9976                         acrtc->enabled = true;
9977                         acrtc->hw_mode = new_crtc_state->mode;
9978                         crtc->hwmode = new_crtc_state->mode;
9979                         mode_set_reset_required = true;
9980                 } else if (modereset_required(new_crtc_state)) {
9981                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9982                         /* i.e. reset mode */
9983                         if (dm_old_crtc_state->stream)
9984                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9985
9986                         mode_set_reset_required = true;
9987                 }
9988         } /* for_each_crtc_in_state() */
9989
9990         if (dc_state) {
9991                 /* if there mode set or reset, disable eDP PSR */
9992                 if (mode_set_reset_required) {
9993                         if (dm->vblank_control_workqueue)
9994                                 flush_workqueue(dm->vblank_control_workqueue);
9995
9996                         amdgpu_dm_psr_disable_all(dm);
9997                 }
9998
9999                 dm_enable_per_frame_crtc_master_sync(dc_state);
10000                 mutex_lock(&dm->dc_lock);
10001                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
10002
10003                 /* Allow idle optimization when vblank count is 0 for display off */
10004                 if (dm->active_vblank_irq_count == 0)
10005                         dc_allow_idle_optimizations(dm->dc, true);
10006                 mutex_unlock(&dm->dc_lock);
10007         }
10008
10009         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10010                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10011
10012                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10013
10014                 if (dm_new_crtc_state->stream != NULL) {
10015                         const struct dc_stream_status *status =
10016                                         dc_stream_get_status(dm_new_crtc_state->stream);
10017
10018                         if (!status)
10019                                 status = dc_stream_get_status_from_state(dc_state,
10020                                                                          dm_new_crtc_state->stream);
10021                         if (!status)
10022                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
10023                         else
10024                                 acrtc->otg_inst = status->primary_otg_inst;
10025                 }
10026         }
10027 #ifdef CONFIG_DRM_AMD_DC_HDCP
10028         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10029                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10030                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10031                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10032
10033                 new_crtc_state = NULL;
10034
10035                 if (acrtc)
10036                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10037
10038                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10039
10040                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10041                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10042                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10043                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10044                         dm_new_con_state->update_hdcp = true;
10045                         continue;
10046                 }
10047
10048                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10049                         hdcp_update_display(
10050                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10051                                 new_con_state->hdcp_content_type,
10052                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10053         }
10054 #endif
10055
10056         /* Handle connector state changes */
10057         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10058                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10059                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10060                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10061                 struct dc_surface_update dummy_updates[MAX_SURFACES];
10062                 struct dc_stream_update stream_update;
10063                 struct dc_info_packet hdr_packet;
10064                 struct dc_stream_status *status = NULL;
10065                 bool abm_changed, hdr_changed, scaling_changed;
10066
10067                 memset(&dummy_updates, 0, sizeof(dummy_updates));
10068                 memset(&stream_update, 0, sizeof(stream_update));
10069
10070                 if (acrtc) {
10071                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10072                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10073                 }
10074
10075                 /* Skip any modesets/resets */
10076                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10077                         continue;
10078
10079                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10080                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10081
10082                 scaling_changed = is_scaling_state_different(dm_new_con_state,
10083                                                              dm_old_con_state);
10084
10085                 abm_changed = dm_new_crtc_state->abm_level !=
10086                               dm_old_crtc_state->abm_level;
10087
10088                 hdr_changed =
10089                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10090
10091                 if (!scaling_changed && !abm_changed && !hdr_changed)
10092                         continue;
10093
10094                 stream_update.stream = dm_new_crtc_state->stream;
10095                 if (scaling_changed) {
10096                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10097                                         dm_new_con_state, dm_new_crtc_state->stream);
10098
10099                         stream_update.src = dm_new_crtc_state->stream->src;
10100                         stream_update.dst = dm_new_crtc_state->stream->dst;
10101                 }
10102
10103                 if (abm_changed) {
10104                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10105
10106                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
10107                 }
10108
10109                 if (hdr_changed) {
10110                         fill_hdr_info_packet(new_con_state, &hdr_packet);
10111                         stream_update.hdr_static_metadata = &hdr_packet;
10112                 }
10113
10114                 status = dc_stream_get_status(dm_new_crtc_state->stream);
10115
10116                 if (WARN_ON(!status))
10117                         continue;
10118
10119                 WARN_ON(!status->plane_count);
10120
10121                 /*
10122                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
10123                  * Here we create an empty update on each plane.
10124                  * To fix this, DC should permit updating only stream properties.
10125                  */
10126                 for (j = 0; j < status->plane_count; j++)
10127                         dummy_updates[j].surface = status->plane_states[0];
10128
10129
10130                 mutex_lock(&dm->dc_lock);
10131                 dc_commit_updates_for_stream(dm->dc,
10132                                                      dummy_updates,
10133                                                      status->plane_count,
10134                                                      dm_new_crtc_state->stream,
10135                                                      &stream_update,
10136                                                      dc_state);
10137                 mutex_unlock(&dm->dc_lock);
10138         }
10139
10140         /* Count number of newly disabled CRTCs for dropping PM refs later. */
10141         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10142                                       new_crtc_state, i) {
10143                 if (old_crtc_state->active && !new_crtc_state->active)
10144                         crtc_disable_count++;
10145
10146                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10147                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10148
10149                 /* For freesync config update on crtc state and params for irq */
10150                 update_stream_irq_parameters(dm, dm_new_crtc_state);
10151
10152                 /* Handle vrr on->off / off->on transitions */
10153                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10154                                                 dm_new_crtc_state);
10155         }
10156
10157         /**
10158          * Enable interrupts for CRTCs that are newly enabled or went through
10159          * a modeset. It was intentionally deferred until after the front end
10160          * state was modified to wait until the OTG was on and so the IRQ
10161          * handlers didn't access stale or invalid state.
10162          */
10163         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10164                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10165 #ifdef CONFIG_DEBUG_FS
10166                 bool configure_crc = false;
10167                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10169                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10170 #endif
10171                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10172                 cur_crc_src = acrtc->dm_irq_params.crc_src;
10173                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10174 #endif
10175                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10176
10177                 if (new_crtc_state->active &&
10178                     (!old_crtc_state->active ||
10179                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10180                         dc_stream_retain(dm_new_crtc_state->stream);
10181                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10182                         manage_dm_interrupts(adev, acrtc, true);
10183
10184 #ifdef CONFIG_DEBUG_FS
10185                         /**
10186                          * Frontend may have changed so reapply the CRC capture
10187                          * settings for the stream.
10188                          */
10189                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10190
10191                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10192                                 configure_crc = true;
10193 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10194                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10195                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10196                                         acrtc->dm_irq_params.crc_window.update_win = true;
10197                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10198                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10199                                         crc_rd_wrk->crtc = crtc;
10200                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10201                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10202                                 }
10203 #endif
10204                         }
10205
10206                         if (configure_crc)
10207                                 if (amdgpu_dm_crtc_configure_crc_source(
10208                                         crtc, dm_new_crtc_state, cur_crc_src))
10209                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
10210 #endif
10211                 }
10212         }
10213
10214         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10215                 if (new_crtc_state->async_flip)
10216                         wait_for_vblank = false;
10217
10218         /* update planes when needed per crtc*/
10219         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10220                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10221
10222                 if (dm_new_crtc_state->stream)
10223                         amdgpu_dm_commit_planes(state, dc_state, dev,
10224                                                 dm, crtc, wait_for_vblank);
10225         }
10226
10227         /* Update audio instances for each connector. */
10228         amdgpu_dm_commit_audio(dev, state);
10229
10230         /* restore the backlight level */
10231         for (i = 0; i < dm->num_of_edps; i++) {
10232                 if (dm->backlight_dev[i] &&
10233                     (dm->actual_brightness[i] != dm->brightness[i]))
10234                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10235         }
10236
10237         /*
10238          * send vblank event on all events not handled in flip and
10239          * mark consumed event for drm_atomic_helper_commit_hw_done
10240          */
10241         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10242         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10243
10244                 if (new_crtc_state->event)
10245                         drm_send_event_locked(dev, &new_crtc_state->event->base);
10246
10247                 new_crtc_state->event = NULL;
10248         }
10249         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10250
10251         /* Signal HW programming completion */
10252         drm_atomic_helper_commit_hw_done(state);
10253
10254         if (wait_for_vblank)
10255                 drm_atomic_helper_wait_for_flip_done(dev, state);
10256
10257         drm_atomic_helper_cleanup_planes(dev, state);
10258
10259         /* return the stolen vga memory back to VRAM */
10260         if (!adev->mman.keep_stolen_vga_memory)
10261                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10262         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10263
10264         /*
10265          * Finally, drop a runtime PM reference for each newly disabled CRTC,
10266          * so we can put the GPU into runtime suspend if we're not driving any
10267          * displays anymore
10268          */
10269         for (i = 0; i < crtc_disable_count; i++)
10270                 pm_runtime_put_autosuspend(dev->dev);
10271         pm_runtime_mark_last_busy(dev->dev);
10272
10273         if (dc_state_temp)
10274                 dc_release_state(dc_state_temp);
10275 }
10276
10277
10278 static int dm_force_atomic_commit(struct drm_connector *connector)
10279 {
10280         int ret = 0;
10281         struct drm_device *ddev = connector->dev;
10282         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10283         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10284         struct drm_plane *plane = disconnected_acrtc->base.primary;
10285         struct drm_connector_state *conn_state;
10286         struct drm_crtc_state *crtc_state;
10287         struct drm_plane_state *plane_state;
10288
10289         if (!state)
10290                 return -ENOMEM;
10291
10292         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10293
10294         /* Construct an atomic state to restore previous display setting */
10295
10296         /*
10297          * Attach connectors to drm_atomic_state
10298          */
10299         conn_state = drm_atomic_get_connector_state(state, connector);
10300
10301         ret = PTR_ERR_OR_ZERO(conn_state);
10302         if (ret)
10303                 goto out;
10304
10305         /* Attach crtc to drm_atomic_state*/
10306         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10307
10308         ret = PTR_ERR_OR_ZERO(crtc_state);
10309         if (ret)
10310                 goto out;
10311
10312         /* force a restore */
10313         crtc_state->mode_changed = true;
10314
10315         /* Attach plane to drm_atomic_state */
10316         plane_state = drm_atomic_get_plane_state(state, plane);
10317
10318         ret = PTR_ERR_OR_ZERO(plane_state);
10319         if (ret)
10320                 goto out;
10321
10322         /* Call commit internally with the state we just constructed */
10323         ret = drm_atomic_commit(state);
10324
10325 out:
10326         drm_atomic_state_put(state);
10327         if (ret)
10328                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10329
10330         return ret;
10331 }
10332
10333 /*
10334  * This function handles all cases when set mode does not come upon hotplug.
10335  * This includes when a display is unplugged then plugged back into the
10336  * same port and when running without usermode desktop manager supprot
10337  */
10338 void dm_restore_drm_connector_state(struct drm_device *dev,
10339                                     struct drm_connector *connector)
10340 {
10341         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10342         struct amdgpu_crtc *disconnected_acrtc;
10343         struct dm_crtc_state *acrtc_state;
10344
10345         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10346                 return;
10347
10348         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10349         if (!disconnected_acrtc)
10350                 return;
10351
10352         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10353         if (!acrtc_state->stream)
10354                 return;
10355
10356         /*
10357          * If the previous sink is not released and different from the current,
10358          * we deduce we are in a state where we can not rely on usermode call
10359          * to turn on the display, so we do it here
10360          */
10361         if (acrtc_state->stream->sink != aconnector->dc_sink)
10362                 dm_force_atomic_commit(&aconnector->base);
10363 }
10364
10365 /*
10366  * Grabs all modesetting locks to serialize against any blocking commits,
10367  * Waits for completion of all non blocking commits.
10368  */
10369 static int do_aquire_global_lock(struct drm_device *dev,
10370                                  struct drm_atomic_state *state)
10371 {
10372         struct drm_crtc *crtc;
10373         struct drm_crtc_commit *commit;
10374         long ret;
10375
10376         /*
10377          * Adding all modeset locks to aquire_ctx will
10378          * ensure that when the framework release it the
10379          * extra locks we are locking here will get released to
10380          */
10381         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10382         if (ret)
10383                 return ret;
10384
10385         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10386                 spin_lock(&crtc->commit_lock);
10387                 commit = list_first_entry_or_null(&crtc->commit_list,
10388                                 struct drm_crtc_commit, commit_entry);
10389                 if (commit)
10390                         drm_crtc_commit_get(commit);
10391                 spin_unlock(&crtc->commit_lock);
10392
10393                 if (!commit)
10394                         continue;
10395
10396                 /*
10397                  * Make sure all pending HW programming completed and
10398                  * page flips done
10399                  */
10400                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10401
10402                 if (ret > 0)
10403                         ret = wait_for_completion_interruptible_timeout(
10404                                         &commit->flip_done, 10*HZ);
10405
10406                 if (ret == 0)
10407                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10408                                   "timed out\n", crtc->base.id, crtc->name);
10409
10410                 drm_crtc_commit_put(commit);
10411         }
10412
10413         return ret < 0 ? ret : 0;
10414 }
10415
10416 static void get_freesync_config_for_crtc(
10417         struct dm_crtc_state *new_crtc_state,
10418         struct dm_connector_state *new_con_state)
10419 {
10420         struct mod_freesync_config config = {0};
10421         struct amdgpu_dm_connector *aconnector =
10422                         to_amdgpu_dm_connector(new_con_state->base.connector);
10423         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10424         int vrefresh = drm_mode_vrefresh(mode);
10425         bool fs_vid_mode = false;
10426
10427         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10428                                         vrefresh >= aconnector->min_vfreq &&
10429                                         vrefresh <= aconnector->max_vfreq;
10430
10431         if (new_crtc_state->vrr_supported) {
10432                 new_crtc_state->stream->ignore_msa_timing_param = true;
10433                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10434
10435                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10436                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10437                 config.vsif_supported = true;
10438                 config.btr = true;
10439
10440                 if (fs_vid_mode) {
10441                         config.state = VRR_STATE_ACTIVE_FIXED;
10442                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10443                         goto out;
10444                 } else if (new_crtc_state->base.vrr_enabled) {
10445                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10446                 } else {
10447                         config.state = VRR_STATE_INACTIVE;
10448                 }
10449         }
10450 out:
10451         new_crtc_state->freesync_config = config;
10452 }
10453
10454 static void reset_freesync_config_for_crtc(
10455         struct dm_crtc_state *new_crtc_state)
10456 {
10457         new_crtc_state->vrr_supported = false;
10458
10459         memset(&new_crtc_state->vrr_infopacket, 0,
10460                sizeof(new_crtc_state->vrr_infopacket));
10461 }
10462
10463 static bool
10464 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10465                                  struct drm_crtc_state *new_crtc_state)
10466 {
10467         const struct drm_display_mode *old_mode, *new_mode;
10468
10469         if (!old_crtc_state || !new_crtc_state)
10470                 return false;
10471
10472         old_mode = &old_crtc_state->mode;
10473         new_mode = &new_crtc_state->mode;
10474
10475         if (old_mode->clock       == new_mode->clock &&
10476             old_mode->hdisplay    == new_mode->hdisplay &&
10477             old_mode->vdisplay    == new_mode->vdisplay &&
10478             old_mode->htotal      == new_mode->htotal &&
10479             old_mode->vtotal      != new_mode->vtotal &&
10480             old_mode->hsync_start == new_mode->hsync_start &&
10481             old_mode->vsync_start != new_mode->vsync_start &&
10482             old_mode->hsync_end   == new_mode->hsync_end &&
10483             old_mode->vsync_end   != new_mode->vsync_end &&
10484             old_mode->hskew       == new_mode->hskew &&
10485             old_mode->vscan       == new_mode->vscan &&
10486             (old_mode->vsync_end - old_mode->vsync_start) ==
10487             (new_mode->vsync_end - new_mode->vsync_start))
10488                 return true;
10489
10490         return false;
10491 }
10492
10493 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10494         uint64_t num, den, res;
10495         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10496
10497         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10498
10499         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10500         den = (unsigned long long)new_crtc_state->mode.htotal *
10501               (unsigned long long)new_crtc_state->mode.vtotal;
10502
10503         res = div_u64(num, den);
10504         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10505 }
10506
10507 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10508                          struct drm_atomic_state *state,
10509                          struct drm_crtc *crtc,
10510                          struct drm_crtc_state *old_crtc_state,
10511                          struct drm_crtc_state *new_crtc_state,
10512                          bool enable,
10513                          bool *lock_and_validation_needed)
10514 {
10515         struct dm_atomic_state *dm_state = NULL;
10516         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10517         struct dc_stream_state *new_stream;
10518         int ret = 0;
10519
10520         /*
10521          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10522          * update changed items
10523          */
10524         struct amdgpu_crtc *acrtc = NULL;
10525         struct amdgpu_dm_connector *aconnector = NULL;
10526         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10527         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10528
10529         new_stream = NULL;
10530
10531         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10532         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10533         acrtc = to_amdgpu_crtc(crtc);
10534         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10535
10536         /* TODO This hack should go away */
10537         if (aconnector && enable) {
10538                 /* Make sure fake sink is created in plug-in scenario */
10539                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10540                                                             &aconnector->base);
10541                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10542                                                             &aconnector->base);
10543
10544                 if (IS_ERR(drm_new_conn_state)) {
10545                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10546                         goto fail;
10547                 }
10548
10549                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10550                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10551
10552                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10553                         goto skip_modeset;
10554
10555                 new_stream = create_validate_stream_for_sink(aconnector,
10556                                                              &new_crtc_state->mode,
10557                                                              dm_new_conn_state,
10558                                                              dm_old_crtc_state->stream);
10559
10560                 /*
10561                  * we can have no stream on ACTION_SET if a display
10562                  * was disconnected during S3, in this case it is not an
10563                  * error, the OS will be updated after detection, and
10564                  * will do the right thing on next atomic commit
10565                  */
10566
10567                 if (!new_stream) {
10568                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10569                                         __func__, acrtc->base.base.id);
10570                         ret = -ENOMEM;
10571                         goto fail;
10572                 }
10573
10574                 /*
10575                  * TODO: Check VSDB bits to decide whether this should
10576                  * be enabled or not.
10577                  */
10578                 new_stream->triggered_crtc_reset.enabled =
10579                         dm->force_timing_sync;
10580
10581                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10582
10583                 ret = fill_hdr_info_packet(drm_new_conn_state,
10584                                            &new_stream->hdr_static_metadata);
10585                 if (ret)
10586                         goto fail;
10587
10588                 /*
10589                  * If we already removed the old stream from the context
10590                  * (and set the new stream to NULL) then we can't reuse
10591                  * the old stream even if the stream and scaling are unchanged.
10592                  * We'll hit the BUG_ON and black screen.
10593                  *
10594                  * TODO: Refactor this function to allow this check to work
10595                  * in all conditions.
10596                  */
10597                 if (dm_new_crtc_state->stream &&
10598                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10599                         goto skip_modeset;
10600
10601                 if (dm_new_crtc_state->stream &&
10602                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10603                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10604                         new_crtc_state->mode_changed = false;
10605                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10606                                          new_crtc_state->mode_changed);
10607                 }
10608         }
10609
10610         /* mode_changed flag may get updated above, need to check again */
10611         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10612                 goto skip_modeset;
10613
10614         drm_dbg_state(state->dev,
10615                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10616                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10617                 "connectors_changed:%d\n",
10618                 acrtc->crtc_id,
10619                 new_crtc_state->enable,
10620                 new_crtc_state->active,
10621                 new_crtc_state->planes_changed,
10622                 new_crtc_state->mode_changed,
10623                 new_crtc_state->active_changed,
10624                 new_crtc_state->connectors_changed);
10625
10626         /* Remove stream for any changed/disabled CRTC */
10627         if (!enable) {
10628
10629                 if (!dm_old_crtc_state->stream)
10630                         goto skip_modeset;
10631
10632                 if (dm_new_crtc_state->stream &&
10633                     is_timing_unchanged_for_freesync(new_crtc_state,
10634                                                      old_crtc_state)) {
10635                         new_crtc_state->mode_changed = false;
10636                         DRM_DEBUG_DRIVER(
10637                                 "Mode change not required for front porch change, "
10638                                 "setting mode_changed to %d",
10639                                 new_crtc_state->mode_changed);
10640
10641                         set_freesync_fixed_config(dm_new_crtc_state);
10642
10643                         goto skip_modeset;
10644                 } else if (aconnector &&
10645                            is_freesync_video_mode(&new_crtc_state->mode,
10646                                                   aconnector)) {
10647                         struct drm_display_mode *high_mode;
10648
10649                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10650                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10651                                 set_freesync_fixed_config(dm_new_crtc_state);
10652                         }
10653                 }
10654
10655                 ret = dm_atomic_get_state(state, &dm_state);
10656                 if (ret)
10657                         goto fail;
10658
10659                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10660                                 crtc->base.id);
10661
10662                 /* i.e. reset mode */
10663                 if (dc_remove_stream_from_ctx(
10664                                 dm->dc,
10665                                 dm_state->context,
10666                                 dm_old_crtc_state->stream) != DC_OK) {
10667                         ret = -EINVAL;
10668                         goto fail;
10669                 }
10670
10671                 dc_stream_release(dm_old_crtc_state->stream);
10672                 dm_new_crtc_state->stream = NULL;
10673
10674                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10675
10676                 *lock_and_validation_needed = true;
10677
10678         } else {/* Add stream for any updated/enabled CRTC */
10679                 /*
10680                  * Quick fix to prevent NULL pointer on new_stream when
10681                  * added MST connectors not found in existing crtc_state in the chained mode
10682                  * TODO: need to dig out the root cause of that
10683                  */
10684                 if (!aconnector)
10685                         goto skip_modeset;
10686
10687                 if (modereset_required(new_crtc_state))
10688                         goto skip_modeset;
10689
10690                 if (modeset_required(new_crtc_state, new_stream,
10691                                      dm_old_crtc_state->stream)) {
10692
10693                         WARN_ON(dm_new_crtc_state->stream);
10694
10695                         ret = dm_atomic_get_state(state, &dm_state);
10696                         if (ret)
10697                                 goto fail;
10698
10699                         dm_new_crtc_state->stream = new_stream;
10700
10701                         dc_stream_retain(new_stream);
10702
10703                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10704                                          crtc->base.id);
10705
10706                         if (dc_add_stream_to_ctx(
10707                                         dm->dc,
10708                                         dm_state->context,
10709                                         dm_new_crtc_state->stream) != DC_OK) {
10710                                 ret = -EINVAL;
10711                                 goto fail;
10712                         }
10713
10714                         *lock_and_validation_needed = true;
10715                 }
10716         }
10717
10718 skip_modeset:
10719         /* Release extra reference */
10720         if (new_stream)
10721                  dc_stream_release(new_stream);
10722
10723         /*
10724          * We want to do dc stream updates that do not require a
10725          * full modeset below.
10726          */
10727         if (!(enable && aconnector && new_crtc_state->active))
10728                 return 0;
10729         /*
10730          * Given above conditions, the dc state cannot be NULL because:
10731          * 1. We're in the process of enabling CRTCs (just been added
10732          *    to the dc context, or already is on the context)
10733          * 2. Has a valid connector attached, and
10734          * 3. Is currently active and enabled.
10735          * => The dc stream state currently exists.
10736          */
10737         BUG_ON(dm_new_crtc_state->stream == NULL);
10738
10739         /* Scaling or underscan settings */
10740         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10741                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10742                 update_stream_scaling_settings(
10743                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10744
10745         /* ABM settings */
10746         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10747
10748         /*
10749          * Color management settings. We also update color properties
10750          * when a modeset is needed, to ensure it gets reprogrammed.
10751          */
10752         if (dm_new_crtc_state->base.color_mgmt_changed ||
10753             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10754                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10755                 if (ret)
10756                         goto fail;
10757         }
10758
10759         /* Update Freesync settings. */
10760         get_freesync_config_for_crtc(dm_new_crtc_state,
10761                                      dm_new_conn_state);
10762
10763         return ret;
10764
10765 fail:
10766         if (new_stream)
10767                 dc_stream_release(new_stream);
10768         return ret;
10769 }
10770
10771 static bool should_reset_plane(struct drm_atomic_state *state,
10772                                struct drm_plane *plane,
10773                                struct drm_plane_state *old_plane_state,
10774                                struct drm_plane_state *new_plane_state)
10775 {
10776         struct drm_plane *other;
10777         struct drm_plane_state *old_other_state, *new_other_state;
10778         struct drm_crtc_state *new_crtc_state;
10779         int i;
10780
10781         /*
10782          * TODO: Remove this hack once the checks below are sufficient
10783          * enough to determine when we need to reset all the planes on
10784          * the stream.
10785          */
10786         if (state->allow_modeset)
10787                 return true;
10788
10789         /* Exit early if we know that we're adding or removing the plane. */
10790         if (old_plane_state->crtc != new_plane_state->crtc)
10791                 return true;
10792
10793         /* old crtc == new_crtc == NULL, plane not in context. */
10794         if (!new_plane_state->crtc)
10795                 return false;
10796
10797         new_crtc_state =
10798                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10799
10800         if (!new_crtc_state)
10801                 return true;
10802
10803         /* CRTC Degamma changes currently require us to recreate planes. */
10804         if (new_crtc_state->color_mgmt_changed)
10805                 return true;
10806
10807         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10808                 return true;
10809
10810         /*
10811          * If there are any new primary or overlay planes being added or
10812          * removed then the z-order can potentially change. To ensure
10813          * correct z-order and pipe acquisition the current DC architecture
10814          * requires us to remove and recreate all existing planes.
10815          *
10816          * TODO: Come up with a more elegant solution for this.
10817          */
10818         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10819                 struct amdgpu_framebuffer *old_afb, *new_afb;
10820                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10821                         continue;
10822
10823                 if (old_other_state->crtc != new_plane_state->crtc &&
10824                     new_other_state->crtc != new_plane_state->crtc)
10825                         continue;
10826
10827                 if (old_other_state->crtc != new_other_state->crtc)
10828                         return true;
10829
10830                 /* Src/dst size and scaling updates. */
10831                 if (old_other_state->src_w != new_other_state->src_w ||
10832                     old_other_state->src_h != new_other_state->src_h ||
10833                     old_other_state->crtc_w != new_other_state->crtc_w ||
10834                     old_other_state->crtc_h != new_other_state->crtc_h)
10835                         return true;
10836
10837                 /* Rotation / mirroring updates. */
10838                 if (old_other_state->rotation != new_other_state->rotation)
10839                         return true;
10840
10841                 /* Blending updates. */
10842                 if (old_other_state->pixel_blend_mode !=
10843                     new_other_state->pixel_blend_mode)
10844                         return true;
10845
10846                 /* Alpha updates. */
10847                 if (old_other_state->alpha != new_other_state->alpha)
10848                         return true;
10849
10850                 /* Colorspace changes. */
10851                 if (old_other_state->color_range != new_other_state->color_range ||
10852                     old_other_state->color_encoding != new_other_state->color_encoding)
10853                         return true;
10854
10855                 /* Framebuffer checks fall at the end. */
10856                 if (!old_other_state->fb || !new_other_state->fb)
10857                         continue;
10858
10859                 /* Pixel format changes can require bandwidth updates. */
10860                 if (old_other_state->fb->format != new_other_state->fb->format)
10861                         return true;
10862
10863                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10864                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10865
10866                 /* Tiling and DCC changes also require bandwidth updates. */
10867                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10868                     old_afb->base.modifier != new_afb->base.modifier)
10869                         return true;
10870         }
10871
10872         return false;
10873 }
10874
10875 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10876                               struct drm_plane_state *new_plane_state,
10877                               struct drm_framebuffer *fb)
10878 {
10879         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10880         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10881         unsigned int pitch;
10882         bool linear;
10883
10884         if (fb->width > new_acrtc->max_cursor_width ||
10885             fb->height > new_acrtc->max_cursor_height) {
10886                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10887                                  new_plane_state->fb->width,
10888                                  new_plane_state->fb->height);
10889                 return -EINVAL;
10890         }
10891         if (new_plane_state->src_w != fb->width << 16 ||
10892             new_plane_state->src_h != fb->height << 16) {
10893                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10894                 return -EINVAL;
10895         }
10896
10897         /* Pitch in pixels */
10898         pitch = fb->pitches[0] / fb->format->cpp[0];
10899
10900         if (fb->width != pitch) {
10901                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10902                                  fb->width, pitch);
10903                 return -EINVAL;
10904         }
10905
10906         switch (pitch) {
10907         case 64:
10908         case 128:
10909         case 256:
10910                 /* FB pitch is supported by cursor plane */
10911                 break;
10912         default:
10913                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10914                 return -EINVAL;
10915         }
10916
10917         /* Core DRM takes care of checking FB modifiers, so we only need to
10918          * check tiling flags when the FB doesn't have a modifier. */
10919         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10920                 if (adev->family < AMDGPU_FAMILY_AI) {
10921                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10922                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10923                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10924                 } else {
10925                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10926                 }
10927                 if (!linear) {
10928                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10929                         return -EINVAL;
10930                 }
10931         }
10932
10933         return 0;
10934 }
10935
10936 static int dm_update_plane_state(struct dc *dc,
10937                                  struct drm_atomic_state *state,
10938                                  struct drm_plane *plane,
10939                                  struct drm_plane_state *old_plane_state,
10940                                  struct drm_plane_state *new_plane_state,
10941                                  bool enable,
10942                                  bool *lock_and_validation_needed)
10943 {
10944
10945         struct dm_atomic_state *dm_state = NULL;
10946         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10947         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10948         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10949         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10950         struct amdgpu_crtc *new_acrtc;
10951         bool needs_reset;
10952         int ret = 0;
10953
10954
10955         new_plane_crtc = new_plane_state->crtc;
10956         old_plane_crtc = old_plane_state->crtc;
10957         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10958         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10959
10960         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10961                 if (!enable || !new_plane_crtc ||
10962                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10963                         return 0;
10964
10965                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10966
10967                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10968                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10969                         return -EINVAL;
10970                 }
10971
10972                 if (new_plane_state->fb) {
10973                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10974                                                  new_plane_state->fb);
10975                         if (ret)
10976                                 return ret;
10977                 }
10978
10979                 return 0;
10980         }
10981
10982         needs_reset = should_reset_plane(state, plane, old_plane_state,
10983                                          new_plane_state);
10984
10985         /* Remove any changed/removed planes */
10986         if (!enable) {
10987                 if (!needs_reset)
10988                         return 0;
10989
10990                 if (!old_plane_crtc)
10991                         return 0;
10992
10993                 old_crtc_state = drm_atomic_get_old_crtc_state(
10994                                 state, old_plane_crtc);
10995                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10996
10997                 if (!dm_old_crtc_state->stream)
10998                         return 0;
10999
11000                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
11001                                 plane->base.id, old_plane_crtc->base.id);
11002
11003                 ret = dm_atomic_get_state(state, &dm_state);
11004                 if (ret)
11005                         return ret;
11006
11007                 if (!dc_remove_plane_from_context(
11008                                 dc,
11009                                 dm_old_crtc_state->stream,
11010                                 dm_old_plane_state->dc_state,
11011                                 dm_state->context)) {
11012
11013                         return -EINVAL;
11014                 }
11015
11016
11017                 dc_plane_state_release(dm_old_plane_state->dc_state);
11018                 dm_new_plane_state->dc_state = NULL;
11019
11020                 *lock_and_validation_needed = true;
11021
11022         } else { /* Add new planes */
11023                 struct dc_plane_state *dc_new_plane_state;
11024
11025                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11026                         return 0;
11027
11028                 if (!new_plane_crtc)
11029                         return 0;
11030
11031                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11032                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11033
11034                 if (!dm_new_crtc_state->stream)
11035                         return 0;
11036
11037                 if (!needs_reset)
11038                         return 0;
11039
11040                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11041                 if (ret)
11042                         return ret;
11043
11044                 WARN_ON(dm_new_plane_state->dc_state);
11045
11046                 dc_new_plane_state = dc_create_plane_state(dc);
11047                 if (!dc_new_plane_state)
11048                         return -ENOMEM;
11049
11050                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11051                                  plane->base.id, new_plane_crtc->base.id);
11052
11053                 ret = fill_dc_plane_attributes(
11054                         drm_to_adev(new_plane_crtc->dev),
11055                         dc_new_plane_state,
11056                         new_plane_state,
11057                         new_crtc_state);
11058                 if (ret) {
11059                         dc_plane_state_release(dc_new_plane_state);
11060                         return ret;
11061                 }
11062
11063                 ret = dm_atomic_get_state(state, &dm_state);
11064                 if (ret) {
11065                         dc_plane_state_release(dc_new_plane_state);
11066                         return ret;
11067                 }
11068
11069                 /*
11070                  * Any atomic check errors that occur after this will
11071                  * not need a release. The plane state will be attached
11072                  * to the stream, and therefore part of the atomic
11073                  * state. It'll be released when the atomic state is
11074                  * cleaned.
11075                  */
11076                 if (!dc_add_plane_to_context(
11077                                 dc,
11078                                 dm_new_crtc_state->stream,
11079                                 dc_new_plane_state,
11080                                 dm_state->context)) {
11081
11082                         dc_plane_state_release(dc_new_plane_state);
11083                         return -EINVAL;
11084                 }
11085
11086                 dm_new_plane_state->dc_state = dc_new_plane_state;
11087
11088                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11089
11090                 /* Tell DC to do a full surface update every time there
11091                  * is a plane change. Inefficient, but works for now.
11092                  */
11093                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11094
11095                 *lock_and_validation_needed = true;
11096         }
11097
11098
11099         return ret;
11100 }
11101
11102 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11103                                        int *src_w, int *src_h)
11104 {
11105         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11106         case DRM_MODE_ROTATE_90:
11107         case DRM_MODE_ROTATE_270:
11108                 *src_w = plane_state->src_h >> 16;
11109                 *src_h = plane_state->src_w >> 16;
11110                 break;
11111         case DRM_MODE_ROTATE_0:
11112         case DRM_MODE_ROTATE_180:
11113         default:
11114                 *src_w = plane_state->src_w >> 16;
11115                 *src_h = plane_state->src_h >> 16;
11116                 break;
11117         }
11118 }
11119
11120 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11121                                 struct drm_crtc *crtc,
11122                                 struct drm_crtc_state *new_crtc_state)
11123 {
11124         struct drm_plane *cursor = crtc->cursor, *underlying;
11125         struct drm_plane_state *new_cursor_state, *new_underlying_state;
11126         int i;
11127         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11128         int cursor_src_w, cursor_src_h;
11129         int underlying_src_w, underlying_src_h;
11130
11131         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11132          * cursor per pipe but it's going to inherit the scaling and
11133          * positioning from the underlying pipe. Check the cursor plane's
11134          * blending properties match the underlying planes'. */
11135
11136         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11137         if (!new_cursor_state || !new_cursor_state->fb) {
11138                 return 0;
11139         }
11140
11141         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11142         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11143         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11144
11145         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11146                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11147                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11148                         continue;
11149
11150                 /* Ignore disabled planes */
11151                 if (!new_underlying_state->fb)
11152                         continue;
11153
11154                 dm_get_oriented_plane_size(new_underlying_state,
11155                                            &underlying_src_w, &underlying_src_h);
11156                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11157                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11158
11159                 if (cursor_scale_w != underlying_scale_w ||
11160                     cursor_scale_h != underlying_scale_h) {
11161                         drm_dbg_atomic(crtc->dev,
11162                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11163                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11164                         return -EINVAL;
11165                 }
11166
11167                 /* If this plane covers the whole CRTC, no need to check planes underneath */
11168                 if (new_underlying_state->crtc_x <= 0 &&
11169                     new_underlying_state->crtc_y <= 0 &&
11170                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11171                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11172                         break;
11173         }
11174
11175         return 0;
11176 }
11177
11178 #if defined(CONFIG_DRM_AMD_DC_DCN)
11179 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11180 {
11181         struct drm_connector *connector;
11182         struct drm_connector_state *conn_state, *old_conn_state;
11183         struct amdgpu_dm_connector *aconnector = NULL;
11184         int i;
11185         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11186                 if (!conn_state->crtc)
11187                         conn_state = old_conn_state;
11188
11189                 if (conn_state->crtc != crtc)
11190                         continue;
11191
11192                 aconnector = to_amdgpu_dm_connector(connector);
11193                 if (!aconnector->port || !aconnector->mst_port)
11194                         aconnector = NULL;
11195                 else
11196                         break;
11197         }
11198
11199         if (!aconnector)
11200                 return 0;
11201
11202         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11203 }
11204 #endif
11205
11206 /**
11207  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11208  * @dev: The DRM device
11209  * @state: The atomic state to commit
11210  *
11211  * Validate that the given atomic state is programmable by DC into hardware.
11212  * This involves constructing a &struct dc_state reflecting the new hardware
11213  * state we wish to commit, then querying DC to see if it is programmable. It's
11214  * important not to modify the existing DC state. Otherwise, atomic_check
11215  * may unexpectedly commit hardware changes.
11216  *
11217  * When validating the DC state, it's important that the right locks are
11218  * acquired. For full updates case which removes/adds/updates streams on one
11219  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11220  * that any such full update commit will wait for completion of any outstanding
11221  * flip using DRMs synchronization events.
11222  *
11223  * Note that DM adds the affected connectors for all CRTCs in state, when that
11224  * might not seem necessary. This is because DC stream creation requires the
11225  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11226  * be possible but non-trivial - a possible TODO item.
11227  *
11228  * Return: -Error code if validation failed.
11229  */
11230 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11231                                   struct drm_atomic_state *state)
11232 {
11233         struct amdgpu_device *adev = drm_to_adev(dev);
11234         struct dm_atomic_state *dm_state = NULL;
11235         struct dc *dc = adev->dm.dc;
11236         struct drm_connector *connector;
11237         struct drm_connector_state *old_con_state, *new_con_state;
11238         struct drm_crtc *crtc;
11239         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11240         struct drm_plane *plane;
11241         struct drm_plane_state *old_plane_state, *new_plane_state;
11242         enum dc_status status;
11243         int ret, i;
11244         bool lock_and_validation_needed = false;
11245         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11246 #if defined(CONFIG_DRM_AMD_DC_DCN)
11247         struct dsc_mst_fairness_vars vars[MAX_PIPES];
11248         struct drm_dp_mst_topology_state *mst_state;
11249         struct drm_dp_mst_topology_mgr *mgr;
11250 #endif
11251
11252         trace_amdgpu_dm_atomic_check_begin(state);
11253
11254         ret = drm_atomic_helper_check_modeset(dev, state);
11255         if (ret) {
11256                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11257                 goto fail;
11258         }
11259
11260         /* Check connector changes */
11261         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11262                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11263                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11264
11265                 /* Skip connectors that are disabled or part of modeset already. */
11266                 if (!old_con_state->crtc && !new_con_state->crtc)
11267                         continue;
11268
11269                 if (!new_con_state->crtc)
11270                         continue;
11271
11272                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11273                 if (IS_ERR(new_crtc_state)) {
11274                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11275                         ret = PTR_ERR(new_crtc_state);
11276                         goto fail;
11277                 }
11278
11279                 if (dm_old_con_state->abm_level !=
11280                     dm_new_con_state->abm_level)
11281                         new_crtc_state->connectors_changed = true;
11282         }
11283
11284 #if defined(CONFIG_DRM_AMD_DC_DCN)
11285         if (dc_resource_is_dsc_encoding_supported(dc)) {
11286                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11287                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11288                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11289                                 if (ret) {
11290                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11291                                         goto fail;
11292                                 }
11293                         }
11294                 }
11295                 if (!pre_validate_dsc(state, &dm_state, vars)) {
11296                         ret = -EINVAL;
11297                         goto fail;
11298                 }
11299         }
11300 #endif
11301         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11302                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11303
11304                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11305                     !new_crtc_state->color_mgmt_changed &&
11306                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11307                         dm_old_crtc_state->dsc_force_changed == false)
11308                         continue;
11309
11310                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11311                 if (ret) {
11312                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11313                         goto fail;
11314                 }
11315
11316                 if (!new_crtc_state->enable)
11317                         continue;
11318
11319                 ret = drm_atomic_add_affected_connectors(state, crtc);
11320                 if (ret) {
11321                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11322                         goto fail;
11323                 }
11324
11325                 ret = drm_atomic_add_affected_planes(state, crtc);
11326                 if (ret) {
11327                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11328                         goto fail;
11329                 }
11330
11331                 if (dm_old_crtc_state->dsc_force_changed)
11332                         new_crtc_state->mode_changed = true;
11333         }
11334
11335         /*
11336          * Add all primary and overlay planes on the CRTC to the state
11337          * whenever a plane is enabled to maintain correct z-ordering
11338          * and to enable fast surface updates.
11339          */
11340         drm_for_each_crtc(crtc, dev) {
11341                 bool modified = false;
11342
11343                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11344                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11345                                 continue;
11346
11347                         if (new_plane_state->crtc == crtc ||
11348                             old_plane_state->crtc == crtc) {
11349                                 modified = true;
11350                                 break;
11351                         }
11352                 }
11353
11354                 if (!modified)
11355                         continue;
11356
11357                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11358                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11359                                 continue;
11360
11361                         new_plane_state =
11362                                 drm_atomic_get_plane_state(state, plane);
11363
11364                         if (IS_ERR(new_plane_state)) {
11365                                 ret = PTR_ERR(new_plane_state);
11366                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11367                                 goto fail;
11368                         }
11369                 }
11370         }
11371
11372         /* Remove exiting planes if they are modified */
11373         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11374                 ret = dm_update_plane_state(dc, state, plane,
11375                                             old_plane_state,
11376                                             new_plane_state,
11377                                             false,
11378                                             &lock_and_validation_needed);
11379                 if (ret) {
11380                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11381                         goto fail;
11382                 }
11383         }
11384
11385         /* Disable all crtcs which require disable */
11386         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11387                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11388                                            old_crtc_state,
11389                                            new_crtc_state,
11390                                            false,
11391                                            &lock_and_validation_needed);
11392                 if (ret) {
11393                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11394                         goto fail;
11395                 }
11396         }
11397
11398         /* Enable all crtcs which require enable */
11399         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11400                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11401                                            old_crtc_state,
11402                                            new_crtc_state,
11403                                            true,
11404                                            &lock_and_validation_needed);
11405                 if (ret) {
11406                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11407                         goto fail;
11408                 }
11409         }
11410
11411         /* Add new/modified planes */
11412         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11413                 ret = dm_update_plane_state(dc, state, plane,
11414                                             old_plane_state,
11415                                             new_plane_state,
11416                                             true,
11417                                             &lock_and_validation_needed);
11418                 if (ret) {
11419                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11420                         goto fail;
11421                 }
11422         }
11423
11424         /* Run this here since we want to validate the streams we created */
11425         ret = drm_atomic_helper_check_planes(dev, state);
11426         if (ret) {
11427                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11428                 goto fail;
11429         }
11430
11431         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11432                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11433                 if (dm_new_crtc_state->mpo_requested)
11434                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11435         }
11436
11437         /* Check cursor planes scaling */
11438         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11439                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11440                 if (ret) {
11441                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11442                         goto fail;
11443                 }
11444         }
11445
11446         if (state->legacy_cursor_update) {
11447                 /*
11448                  * This is a fast cursor update coming from the plane update
11449                  * helper, check if it can be done asynchronously for better
11450                  * performance.
11451                  */
11452                 state->async_update =
11453                         !drm_atomic_helper_async_check(dev, state);
11454
11455                 /*
11456                  * Skip the remaining global validation if this is an async
11457                  * update. Cursor updates can be done without affecting
11458                  * state or bandwidth calcs and this avoids the performance
11459                  * penalty of locking the private state object and
11460                  * allocating a new dc_state.
11461                  */
11462                 if (state->async_update)
11463                         return 0;
11464         }
11465
11466         /* Check scaling and underscan changes*/
11467         /* TODO Removed scaling changes validation due to inability to commit
11468          * new stream into context w\o causing full reset. Need to
11469          * decide how to handle.
11470          */
11471         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11472                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11473                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11474                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11475
11476                 /* Skip any modesets/resets */
11477                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11478                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11479                         continue;
11480
11481                 /* Skip any thing not scale or underscan changes */
11482                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11483                         continue;
11484
11485                 lock_and_validation_needed = true;
11486         }
11487
11488 #if defined(CONFIG_DRM_AMD_DC_DCN)
11489         /* set the slot info for each mst_state based on the link encoding format */
11490         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11491                 struct amdgpu_dm_connector *aconnector;
11492                 struct drm_connector *connector;
11493                 struct drm_connector_list_iter iter;
11494                 u8 link_coding_cap;
11495
11496                 if (!mgr->mst_state )
11497                         continue;
11498
11499                 drm_connector_list_iter_begin(dev, &iter);
11500                 drm_for_each_connector_iter(connector, &iter) {
11501                         int id = connector->index;
11502
11503                         if (id == mst_state->mgr->conn_base_id) {
11504                                 aconnector = to_amdgpu_dm_connector(connector);
11505                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11506                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11507
11508                                 break;
11509                         }
11510                 }
11511                 drm_connector_list_iter_end(&iter);
11512
11513         }
11514 #endif
11515         /**
11516          * Streams and planes are reset when there are changes that affect
11517          * bandwidth. Anything that affects bandwidth needs to go through
11518          * DC global validation to ensure that the configuration can be applied
11519          * to hardware.
11520          *
11521          * We have to currently stall out here in atomic_check for outstanding
11522          * commits to finish in this case because our IRQ handlers reference
11523          * DRM state directly - we can end up disabling interrupts too early
11524          * if we don't.
11525          *
11526          * TODO: Remove this stall and drop DM state private objects.
11527          */
11528         if (lock_and_validation_needed) {
11529                 ret = dm_atomic_get_state(state, &dm_state);
11530                 if (ret) {
11531                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11532                         goto fail;
11533                 }
11534
11535                 ret = do_aquire_global_lock(dev, state);
11536                 if (ret) {
11537                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11538                         goto fail;
11539                 }
11540
11541 #if defined(CONFIG_DRM_AMD_DC_DCN)
11542                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11543                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11544                         ret = -EINVAL;
11545                         goto fail;
11546                 }
11547
11548                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11549                 if (ret) {
11550                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11551                         goto fail;
11552                 }
11553 #endif
11554
11555                 /*
11556                  * Perform validation of MST topology in the state:
11557                  * We need to perform MST atomic check before calling
11558                  * dc_validate_global_state(), or there is a chance
11559                  * to get stuck in an infinite loop and hang eventually.
11560                  */
11561                 ret = drm_dp_mst_atomic_check(state);
11562                 if (ret) {
11563                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11564                         goto fail;
11565                 }
11566                 status = dc_validate_global_state(dc, dm_state->context, true);
11567                 if (status != DC_OK) {
11568                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11569                                        dc_status_to_str(status), status);
11570                         ret = -EINVAL;
11571                         goto fail;
11572                 }
11573         } else {
11574                 /*
11575                  * The commit is a fast update. Fast updates shouldn't change
11576                  * the DC context, affect global validation, and can have their
11577                  * commit work done in parallel with other commits not touching
11578                  * the same resource. If we have a new DC context as part of
11579                  * the DM atomic state from validation we need to free it and
11580                  * retain the existing one instead.
11581                  *
11582                  * Furthermore, since the DM atomic state only contains the DC
11583                  * context and can safely be annulled, we can free the state
11584                  * and clear the associated private object now to free
11585                  * some memory and avoid a possible use-after-free later.
11586                  */
11587
11588                 for (i = 0; i < state->num_private_objs; i++) {
11589                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11590
11591                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11592                                 int j = state->num_private_objs-1;
11593
11594                                 dm_atomic_destroy_state(obj,
11595                                                 state->private_objs[i].state);
11596
11597                                 /* If i is not at the end of the array then the
11598                                  * last element needs to be moved to where i was
11599                                  * before the array can safely be truncated.
11600                                  */
11601                                 if (i != j)
11602                                         state->private_objs[i] =
11603                                                 state->private_objs[j];
11604
11605                                 state->private_objs[j].ptr = NULL;
11606                                 state->private_objs[j].state = NULL;
11607                                 state->private_objs[j].old_state = NULL;
11608                                 state->private_objs[j].new_state = NULL;
11609
11610                                 state->num_private_objs = j;
11611                                 break;
11612                         }
11613                 }
11614         }
11615
11616         /* Store the overall update type for use later in atomic check. */
11617         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11618                 struct dm_crtc_state *dm_new_crtc_state =
11619                         to_dm_crtc_state(new_crtc_state);
11620
11621                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11622                                                          UPDATE_TYPE_FULL :
11623                                                          UPDATE_TYPE_FAST;
11624         }
11625
11626         /* Must be success */
11627         WARN_ON(ret);
11628
11629         trace_amdgpu_dm_atomic_check_finish(state, ret);
11630
11631         return ret;
11632
11633 fail:
11634         if (ret == -EDEADLK)
11635                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11636         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11637                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11638         else
11639                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11640
11641         trace_amdgpu_dm_atomic_check_finish(state, ret);
11642
11643         return ret;
11644 }
11645
11646 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11647                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11648 {
11649         uint8_t dpcd_data;
11650         bool capable = false;
11651
11652         if (amdgpu_dm_connector->dc_link &&
11653                 dm_helpers_dp_read_dpcd(
11654                                 NULL,
11655                                 amdgpu_dm_connector->dc_link,
11656                                 DP_DOWN_STREAM_PORT_COUNT,
11657                                 &dpcd_data,
11658                                 sizeof(dpcd_data))) {
11659                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11660         }
11661
11662         return capable;
11663 }
11664
11665 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11666                 unsigned int offset,
11667                 unsigned int total_length,
11668                 uint8_t *data,
11669                 unsigned int length,
11670                 struct amdgpu_hdmi_vsdb_info *vsdb)
11671 {
11672         bool res;
11673         union dmub_rb_cmd cmd;
11674         struct dmub_cmd_send_edid_cea *input;
11675         struct dmub_cmd_edid_cea_output *output;
11676
11677         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11678                 return false;
11679
11680         memset(&cmd, 0, sizeof(cmd));
11681
11682         input = &cmd.edid_cea.data.input;
11683
11684         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11685         cmd.edid_cea.header.sub_type = 0;
11686         cmd.edid_cea.header.payload_bytes =
11687                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11688         input->offset = offset;
11689         input->length = length;
11690         input->cea_total_length = total_length;
11691         memcpy(input->payload, data, length);
11692
11693         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11694         if (!res) {
11695                 DRM_ERROR("EDID CEA parser failed\n");
11696                 return false;
11697         }
11698
11699         output = &cmd.edid_cea.data.output;
11700
11701         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11702                 if (!output->ack.success) {
11703                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11704                                         output->ack.offset);
11705                 }
11706         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11707                 if (!output->amd_vsdb.vsdb_found)
11708                         return false;
11709
11710                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11711                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11712                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11713                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11714         } else {
11715                 DRM_WARN("Unknown EDID CEA parser results\n");
11716                 return false;
11717         }
11718
11719         return true;
11720 }
11721
11722 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11723                 uint8_t *edid_ext, int len,
11724                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11725 {
11726         int i;
11727
11728         /* send extension block to DMCU for parsing */
11729         for (i = 0; i < len; i += 8) {
11730                 bool res;
11731                 int offset;
11732
11733                 /* send 8 bytes a time */
11734                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11735                         return false;
11736
11737                 if (i+8 == len) {
11738                         /* EDID block sent completed, expect result */
11739                         int version, min_rate, max_rate;
11740
11741                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11742                         if (res) {
11743                                 /* amd vsdb found */
11744                                 vsdb_info->freesync_supported = 1;
11745                                 vsdb_info->amd_vsdb_version = version;
11746                                 vsdb_info->min_refresh_rate_hz = min_rate;
11747                                 vsdb_info->max_refresh_rate_hz = max_rate;
11748                                 return true;
11749                         }
11750                         /* not amd vsdb */
11751                         return false;
11752                 }
11753
11754                 /* check for ack*/
11755                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11756                 if (!res)
11757                         return false;
11758         }
11759
11760         return false;
11761 }
11762
11763 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11764                 uint8_t *edid_ext, int len,
11765                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11766 {
11767         int i;
11768
11769         /* send extension block to DMCU for parsing */
11770         for (i = 0; i < len; i += 8) {
11771                 /* send 8 bytes a time */
11772                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11773                         return false;
11774         }
11775
11776         return vsdb_info->freesync_supported;
11777 }
11778
11779 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11780                 uint8_t *edid_ext, int len,
11781                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11782 {
11783         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11784
11785         if (adev->dm.dmub_srv)
11786                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11787         else
11788                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11789 }
11790
11791 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11792                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11793 {
11794         uint8_t *edid_ext = NULL;
11795         int i;
11796         bool valid_vsdb_found = false;
11797
11798         /*----- drm_find_cea_extension() -----*/
11799         /* No EDID or EDID extensions */
11800         if (edid == NULL || edid->extensions == 0)
11801                 return -ENODEV;
11802
11803         /* Find CEA extension */
11804         for (i = 0; i < edid->extensions; i++) {
11805                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11806                 if (edid_ext[0] == CEA_EXT)
11807                         break;
11808         }
11809
11810         if (i == edid->extensions)
11811                 return -ENODEV;
11812
11813         /*----- cea_db_offsets() -----*/
11814         if (edid_ext[0] != CEA_EXT)
11815                 return -ENODEV;
11816
11817         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11818
11819         return valid_vsdb_found ? i : -ENODEV;
11820 }
11821
11822 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11823                                         struct edid *edid)
11824 {
11825         int i = 0;
11826         struct detailed_timing *timing;
11827         struct detailed_non_pixel *data;
11828         struct detailed_data_monitor_range *range;
11829         struct amdgpu_dm_connector *amdgpu_dm_connector =
11830                         to_amdgpu_dm_connector(connector);
11831         struct dm_connector_state *dm_con_state = NULL;
11832         struct dc_sink *sink;
11833
11834         struct drm_device *dev = connector->dev;
11835         struct amdgpu_device *adev = drm_to_adev(dev);
11836         bool freesync_capable = false;
11837         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11838
11839         if (!connector->state) {
11840                 DRM_ERROR("%s - Connector has no state", __func__);
11841                 goto update;
11842         }
11843
11844         sink = amdgpu_dm_connector->dc_sink ?
11845                 amdgpu_dm_connector->dc_sink :
11846                 amdgpu_dm_connector->dc_em_sink;
11847
11848         if (!edid || !sink) {
11849                 dm_con_state = to_dm_connector_state(connector->state);
11850
11851                 amdgpu_dm_connector->min_vfreq = 0;
11852                 amdgpu_dm_connector->max_vfreq = 0;
11853                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11854                 connector->display_info.monitor_range.min_vfreq = 0;
11855                 connector->display_info.monitor_range.max_vfreq = 0;
11856                 freesync_capable = false;
11857
11858                 goto update;
11859         }
11860
11861         dm_con_state = to_dm_connector_state(connector->state);
11862
11863         if (!adev->dm.freesync_module)
11864                 goto update;
11865
11866
11867         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11868                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11869                 bool edid_check_required = false;
11870
11871                 if (edid) {
11872                         edid_check_required = is_dp_capable_without_timing_msa(
11873                                                 adev->dm.dc,
11874                                                 amdgpu_dm_connector);
11875                 }
11876
11877                 if (edid_check_required == true && (edid->version > 1 ||
11878                    (edid->version == 1 && edid->revision > 1))) {
11879                         for (i = 0; i < 4; i++) {
11880
11881                                 timing  = &edid->detailed_timings[i];
11882                                 data    = &timing->data.other_data;
11883                                 range   = &data->data.range;
11884                                 /*
11885                                  * Check if monitor has continuous frequency mode
11886                                  */
11887                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11888                                         continue;
11889                                 /*
11890                                  * Check for flag range limits only. If flag == 1 then
11891                                  * no additional timing information provided.
11892                                  * Default GTF, GTF Secondary curve and CVT are not
11893                                  * supported
11894                                  */
11895                                 if (range->flags != 1)
11896                                         continue;
11897
11898                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11899                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11900                                 amdgpu_dm_connector->pixel_clock_mhz =
11901                                         range->pixel_clock_mhz * 10;
11902
11903                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11904                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11905
11906                                 break;
11907                         }
11908
11909                         if (amdgpu_dm_connector->max_vfreq -
11910                             amdgpu_dm_connector->min_vfreq > 10) {
11911
11912                                 freesync_capable = true;
11913                         }
11914                 }
11915         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11916                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11917                 if (i >= 0 && vsdb_info.freesync_supported) {
11918                         timing  = &edid->detailed_timings[i];
11919                         data    = &timing->data.other_data;
11920
11921                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11922                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11923                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11924                                 freesync_capable = true;
11925
11926                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11927                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11928                 }
11929         }
11930
11931 update:
11932         if (dm_con_state)
11933                 dm_con_state->freesync_capable = freesync_capable;
11934
11935         if (connector->vrr_capable_property)
11936                 drm_connector_set_vrr_capable_property(connector,
11937                                                        freesync_capable);
11938 }
11939
11940 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11941 {
11942         struct amdgpu_device *adev = drm_to_adev(dev);
11943         struct dc *dc = adev->dm.dc;
11944         int i;
11945
11946         mutex_lock(&adev->dm.dc_lock);
11947         if (dc->current_state) {
11948                 for (i = 0; i < dc->current_state->stream_count; ++i)
11949                         dc->current_state->streams[i]
11950                                 ->triggered_crtc_reset.enabled =
11951                                 adev->dm.force_timing_sync;
11952
11953                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11954                 dc_trigger_sync(dc, dc->current_state);
11955         }
11956         mutex_unlock(&adev->dm.dc_lock);
11957 }
11958
11959 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11960                        uint32_t value, const char *func_name)
11961 {
11962 #ifdef DM_CHECK_ADDR_0
11963         if (address == 0) {
11964                 DC_ERR("invalid register write. address = 0");
11965                 return;
11966         }
11967 #endif
11968         cgs_write_register(ctx->cgs_device, address, value);
11969         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11970 }
11971
11972 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11973                           const char *func_name)
11974 {
11975         uint32_t value;
11976 #ifdef DM_CHECK_ADDR_0
11977         if (address == 0) {
11978                 DC_ERR("invalid register read; address = 0\n");
11979                 return 0;
11980         }
11981 #endif
11982
11983         if (ctx->dmub_srv &&
11984             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11985             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11986                 ASSERT(false);
11987                 return 0;
11988         }
11989
11990         value = cgs_read_register(ctx->cgs_device, address);
11991
11992         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11993
11994         return value;
11995 }
11996
11997 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11998                                                 struct dc_context *ctx,
11999                                                 uint8_t status_type,
12000                                                 uint32_t *operation_result)
12001 {
12002         struct amdgpu_device *adev = ctx->driver_context;
12003         int return_status = -1;
12004         struct dmub_notification *p_notify = adev->dm.dmub_notify;
12005
12006         if (is_cmd_aux) {
12007                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12008                         return_status = p_notify->aux_reply.length;
12009                         *operation_result = p_notify->result;
12010                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
12011                         *operation_result = AUX_RET_ERROR_TIMEOUT;
12012                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
12013                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12014                 } else {
12015                         *operation_result = AUX_RET_ERROR_UNKNOWN;
12016                 }
12017         } else {
12018                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12019                         return_status = 0;
12020                         *operation_result = p_notify->sc_status;
12021                 } else {
12022                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
12023                 }
12024         }
12025
12026         return return_status;
12027 }
12028
12029 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
12030         unsigned int link_index, void *cmd_payload, void *operation_result)
12031 {
12032         struct amdgpu_device *adev = ctx->driver_context;
12033         int ret = 0;
12034
12035         if (is_cmd_aux) {
12036                 dc_process_dmub_aux_transfer_async(ctx->dc,
12037                         link_index, (struct aux_payload *)cmd_payload);
12038         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12039                                         (struct set_config_cmd_payload *)cmd_payload,
12040                                         adev->dm.dmub_notify)) {
12041                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12042                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12043                                         (uint32_t *)operation_result);
12044         }
12045
12046         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12047         if (ret == 0) {
12048                 DRM_ERROR("wait_for_completion_timeout timeout!");
12049                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12050                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12051                                 (uint32_t *)operation_result);
12052         }
12053
12054         if (is_cmd_aux) {
12055                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12056                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12057
12058                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12059                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12060                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12061                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12062                                        adev->dm.dmub_notify->aux_reply.length);
12063                         }
12064                 }
12065         }
12066
12067         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12068                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12069                         (uint32_t *)operation_result);
12070 }
12071
12072 /*
12073  * Check whether seamless boot is supported.
12074  *
12075  * So far we only support seamless boot on CHIP_VANGOGH.
12076  * If everything goes well, we may consider expanding
12077  * seamless boot to other ASICs.
12078  */
12079 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12080 {
12081         switch (adev->asic_type) {
12082         case CHIP_VANGOGH:
12083                 if (!adev->mman.keep_stolen_vga_memory)
12084                         return true;
12085                 break;
12086         default:
12087                 break;
12088         }
12089
12090         return false;
12091 }
This page took 0.80581 seconds and 4 git commands to generate.