]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/privacy-screen: Fix sphinx warning
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637         if (adev->dm.dmub_notify)
638                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640                 complete(&adev->dm.dmub_aux_transfer_done);
641 }
642
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653         struct amdgpu_dm_connector *aconnector;
654         struct amdgpu_dm_connector *hpd_aconnector = NULL;
655         struct drm_connector *connector;
656         struct drm_connector_list_iter iter;
657         struct dc_link *link;
658         uint8_t link_index = 0;
659         struct drm_device *dev = adev->dm.ddev;
660
661         if (adev == NULL)
662                 return;
663
664         if (notify == NULL) {
665                 DRM_ERROR("DMUB HPD callback notification was NULL");
666                 return;
667         }
668
669         if (notify->link_index > adev->dm.dc->link_count) {
670                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671                 return;
672         }
673
674         link_index = notify->link_index;
675         link = adev->dm.dc->links[link_index];
676
677         drm_connector_list_iter_begin(dev, &iter);
678         drm_for_each_connector_iter(connector, &iter) {
679                 aconnector = to_amdgpu_dm_connector(connector);
680                 if (link && aconnector->dc_link == link) {
681                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682                         hpd_aconnector = aconnector;
683                         break;
684                 }
685         }
686         drm_connector_list_iter_end(&iter);
687
688         if (hpd_aconnector) {
689                 if (notify->type == DMUB_NOTIFICATION_HPD)
690                         handle_hpd_irq_helper(hpd_aconnector);
691                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692                         handle_hpd_rx_irq(hpd_aconnector);
693         }
694 }
695
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712                 adev->dm.dmub_callback[type] = callback;
713                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714         } else
715                 return false;
716
717         return true;
718 }
719
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722         struct dmub_hpd_work *dmub_hpd_wrk;
723
724         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726         if (!dmub_hpd_wrk->dmub_notify) {
727                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728                 return;
729         }
730
731         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733                 dmub_hpd_wrk->dmub_notify);
734         }
735
736         kfree(dmub_hpd_wrk->dmub_notify);
737         kfree(dmub_hpd_wrk);
738
739 }
740
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751         struct dmub_notification notify;
752         struct common_irq_params *irq_params = interrupt_params;
753         struct amdgpu_device *adev = irq_params->adev;
754         struct amdgpu_display_manager *dm = &adev->dm;
755         struct dmcub_trace_buf_entry entry = { 0 };
756         uint32_t count = 0;
757         struct dmub_hpd_work *dmub_hpd_wrk;
758         struct dc_link *plink = NULL;
759
760         if (dc_enable_dmub_notifications(adev->dm.dc) &&
761                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762
763                 do {
764                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767                                 continue;
768                         }
769                         if (!dm->dmub_callback[notify.type]) {
770                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771                                 continue;
772                         }
773                         if (dm->dmub_thread_offload[notify.type] == true) {
774                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775                                 if (!dmub_hpd_wrk) {
776                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777                                         return;
778                                 }
779                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk->dmub_notify) {
781                                         kfree(dmub_hpd_wrk);
782                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783                                         return;
784                                 }
785                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786                                 if (dmub_hpd_wrk->dmub_notify)
787                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788                                 dmub_hpd_wrk->adev = adev;
789                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
790                                         plink = adev->dm.dc->links[notify.link_index];
791                                         if (plink) {
792                                                 plink->hpd_status =
793                                                         notify.hpd_status == DP_HPD_PLUG;
794                                         }
795                                 }
796                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797                         } else {
798                                 dm->dmub_callback[notify.type](adev, &notify);
799                         }
800                 } while (notify.pending_notification);
801         }
802
803
804         do {
805                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807                                                         entry.param0, entry.param1);
808
809                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811                 } else
812                         break;
813
814                 count++;
815
816         } while (count <= DMUB_TRACE_MAX_READ);
817
818         if (count > DMUB_TRACE_MAX_READ)
819                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820 }
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
822
823 static int dm_set_clockgating_state(void *handle,
824                   enum amd_clockgating_state state)
825 {
826         return 0;
827 }
828
829 static int dm_set_powergating_state(void *handle,
830                   enum amd_powergating_state state)
831 {
832         return 0;
833 }
834
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
837
838 /* Allocate memory for FBC compressed data  */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840 {
841         struct drm_device *dev = connector->dev;
842         struct amdgpu_device *adev = drm_to_adev(dev);
843         struct dm_compressor_info *compressor = &adev->dm.compressor;
844         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845         struct drm_display_mode *mode;
846         unsigned long max_size = 0;
847
848         if (adev->dm.dc->fbc_compressor == NULL)
849                 return;
850
851         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852                 return;
853
854         if (compressor->bo_ptr)
855                 return;
856
857
858         list_for_each_entry(mode, &connector->modes, head) {
859                 if (max_size < mode->htotal * mode->vtotal)
860                         max_size = mode->htotal * mode->vtotal;
861         }
862
863         if (max_size) {
864                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866                             &compressor->gpu_addr, &compressor->cpu_addr);
867
868                 if (r)
869                         DRM_ERROR("DM: Failed to initialize FBC\n");
870                 else {
871                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873                 }
874
875         }
876
877 }
878
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880                                           int pipe, bool *enabled,
881                                           unsigned char *buf, int max_bytes)
882 {
883         struct drm_device *dev = dev_get_drvdata(kdev);
884         struct amdgpu_device *adev = drm_to_adev(dev);
885         struct drm_connector *connector;
886         struct drm_connector_list_iter conn_iter;
887         struct amdgpu_dm_connector *aconnector;
888         int ret = 0;
889
890         *enabled = false;
891
892         mutex_lock(&adev->dm.audio_lock);
893
894         drm_connector_list_iter_begin(dev, &conn_iter);
895         drm_for_each_connector_iter(connector, &conn_iter) {
896                 aconnector = to_amdgpu_dm_connector(connector);
897                 if (aconnector->audio_inst != port)
898                         continue;
899
900                 *enabled = true;
901                 ret = drm_eld_size(connector->eld);
902                 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904                 break;
905         }
906         drm_connector_list_iter_end(&conn_iter);
907
908         mutex_unlock(&adev->dm.audio_lock);
909
910         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912         return ret;
913 }
914
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916         .get_eld = amdgpu_dm_audio_component_get_eld,
917 };
918
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920                                        struct device *hda_kdev, void *data)
921 {
922         struct drm_device *dev = dev_get_drvdata(kdev);
923         struct amdgpu_device *adev = drm_to_adev(dev);
924         struct drm_audio_component *acomp = data;
925
926         acomp->ops = &amdgpu_dm_audio_component_ops;
927         acomp->dev = kdev;
928         adev->dm.audio_component = acomp;
929
930         return 0;
931 }
932
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934                                           struct device *hda_kdev, void *data)
935 {
936         struct drm_device *dev = dev_get_drvdata(kdev);
937         struct amdgpu_device *adev = drm_to_adev(dev);
938         struct drm_audio_component *acomp = data;
939
940         acomp->ops = NULL;
941         acomp->dev = NULL;
942         adev->dm.audio_component = NULL;
943 }
944
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946         .bind   = amdgpu_dm_audio_component_bind,
947         .unbind = amdgpu_dm_audio_component_unbind,
948 };
949
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951 {
952         int i, ret;
953
954         if (!amdgpu_audio)
955                 return 0;
956
957         adev->mode_info.audio.enabled = true;
958
959         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962                 adev->mode_info.audio.pin[i].channels = -1;
963                 adev->mode_info.audio.pin[i].rate = -1;
964                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965                 adev->mode_info.audio.pin[i].status_bits = 0;
966                 adev->mode_info.audio.pin[i].category_code = 0;
967                 adev->mode_info.audio.pin[i].connected = false;
968                 adev->mode_info.audio.pin[i].id =
969                         adev->dm.dc->res_pool->audios[i]->inst;
970                 adev->mode_info.audio.pin[i].offset = 0;
971         }
972
973         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974         if (ret < 0)
975                 return ret;
976
977         adev->dm.audio_registered = true;
978
979         return 0;
980 }
981
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983 {
984         if (!amdgpu_audio)
985                 return;
986
987         if (!adev->mode_info.audio.enabled)
988                 return;
989
990         if (adev->dm.audio_registered) {
991                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992                 adev->dm.audio_registered = false;
993         }
994
995         /* TODO: Disable audio? */
996
997         adev->mode_info.audio.enabled = false;
998 }
999
1000 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001 {
1002         struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008                                                  pin, -1);
1009         }
1010 }
1011
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013 {
1014         const struct dmcub_firmware_header_v1_0 *hdr;
1015         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019         struct abm *abm = adev->dm.dc->res_pool->abm;
1020         struct dmub_srv_hw_params hw_params;
1021         enum dmub_status status;
1022         const unsigned char *fw_inst_const, *fw_bss_data;
1023         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024         bool has_hw_support;
1025         struct dc *dc = adev->dm.dc;
1026
1027         if (!dmub_srv)
1028                 /* DMUB isn't supported on the ASIC. */
1029                 return 0;
1030
1031         if (!fb_info) {
1032                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033                 return -EINVAL;
1034         }
1035
1036         if (!dmub_fw) {
1037                 /* Firmware required for DMUB support. */
1038                 DRM_ERROR("No firmware provided for DMUB.\n");
1039                 return -EINVAL;
1040         }
1041
1042         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043         if (status != DMUB_STATUS_OK) {
1044                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045                 return -EINVAL;
1046         }
1047
1048         if (!has_hw_support) {
1049                 DRM_INFO("DMUB unsupported on ASIC\n");
1050                 return 0;
1051         }
1052
1053         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
1055         fw_inst_const = dmub_fw->data +
1056                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057                         PSP_HEADER_BYTES;
1058
1059         fw_bss_data = dmub_fw->data +
1060                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061                       le32_to_cpu(hdr->inst_const_bytes);
1062
1063         /* Copy firmware and bios info into FB memory. */
1064         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
1069         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070          * amdgpu_ucode_init_single_fw will load dmub firmware
1071          * fw_inst_const part to cw0; otherwise, the firmware back door load
1072          * will be done by dm_dmub_hw_init
1073          */
1074         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076                                 fw_inst_const_size);
1077         }
1078
1079         if (fw_bss_data_size)
1080                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081                        fw_bss_data, fw_bss_data_size);
1082
1083         /* Copy firmware bios info into FB memory. */
1084         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085                adev->bios_size);
1086
1087         /* Reset regions that need to be reset. */
1088         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096
1097         /* Initialize hardware. */
1098         memset(&hw_params, 0, sizeof(hw_params));
1099         hw_params.fb_base = adev->gmc.fb_start;
1100         hw_params.fb_offset = adev->gmc.aper_base;
1101
1102         /* backdoor load firmware and trigger dmub running */
1103         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104                 hw_params.load_inst_const = true;
1105
1106         if (dmcu)
1107                 hw_params.psp_version = dmcu->psp_version;
1108
1109         for (i = 0; i < fb_info->num_fb; ++i)
1110                 hw_params.fb[i] = &fb_info->fb[i];
1111
1112         switch (adev->asic_type) {
1113         case CHIP_YELLOW_CARP:
1114                 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115                         hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117                         hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118 #endif
1119                 }
1120                 break;
1121         default:
1122                 break;
1123         }
1124
1125         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126         if (status != DMUB_STATUS_OK) {
1127                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128                 return -EINVAL;
1129         }
1130
1131         /* Wait for firmware load to finish. */
1132         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133         if (status != DMUB_STATUS_OK)
1134                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136         /* Init DMCU and ABM if available. */
1137         if (dmcu && abm) {
1138                 dmcu->funcs->dmcu_init(dmcu);
1139                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140         }
1141
1142         if (!adev->dm.dc->ctx->dmub_srv)
1143                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144         if (!adev->dm.dc->ctx->dmub_srv) {
1145                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146                 return -ENOMEM;
1147         }
1148
1149         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150                  adev->dm.dmcub_fw_version);
1151
1152         return 0;
1153 }
1154
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157 {
1158         uint64_t pt_base;
1159         uint32_t logical_addr_low;
1160         uint32_t logical_addr_high;
1161         uint32_t agp_base, agp_bot, agp_top;
1162         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163
1164         memset(pa_config, 0, sizeof(*pa_config));
1165
1166         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168
1169         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170                 /*
1171                  * Raven2 has a HW issue that it is unable to use the vram which
1172                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173                  * workaround that increase system aperture high address (add 1)
1174                  * to get rid of the VM fault and hardware hang.
1175                  */
1176                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177         else
1178                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179
1180         agp_base = 0;
1181         agp_bot = adev->gmc.agp_start >> 24;
1182         agp_top = adev->gmc.agp_end >> 24;
1183
1184
1185         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190         page_table_base.low_part = lower_32_bits(pt_base);
1191
1192         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207         pa_config->is_hvm_enabled = 0;
1208
1209 }
1210 #endif
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1213 {
1214         struct vblank_control_work *vblank_work =
1215                 container_of(work, struct vblank_control_work, work);
1216         struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218         mutex_lock(&dm->dc_lock);
1219
1220         if (vblank_work->enable)
1221                 dm->active_vblank_irq_count++;
1222         else if(dm->active_vblank_irq_count)
1223                 dm->active_vblank_irq_count--;
1224
1225         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226
1227         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228
1229         /* Control PSR based on vblank requirements from OS */
1230         if (vblank_work->stream && vblank_work->stream->link) {
1231                 if (vblank_work->enable) {
1232                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233                                 amdgpu_dm_psr_disable(vblank_work->stream);
1234                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237                         amdgpu_dm_psr_enable(vblank_work->stream);
1238                 }
1239         }
1240
1241         mutex_unlock(&dm->dc_lock);
1242
1243         dc_stream_release(vblank_work->stream);
1244
1245         kfree(vblank_work);
1246 }
1247
1248 #endif
1249
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251 {
1252         struct hpd_rx_irq_offload_work *offload_work;
1253         struct amdgpu_dm_connector *aconnector;
1254         struct dc_link *dc_link;
1255         struct amdgpu_device *adev;
1256         enum dc_connection_type new_connection_type = dc_connection_none;
1257         unsigned long flags;
1258
1259         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260         aconnector = offload_work->offload_wq->aconnector;
1261
1262         if (!aconnector) {
1263                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264                 goto skip;
1265         }
1266
1267         adev = drm_to_adev(aconnector->base.dev);
1268         dc_link = aconnector->dc_link;
1269
1270         mutex_lock(&aconnector->hpd_lock);
1271         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272                 DRM_ERROR("KMS: Failed to detect connector\n");
1273         mutex_unlock(&aconnector->hpd_lock);
1274
1275         if (new_connection_type == dc_connection_none)
1276                 goto skip;
1277
1278         if (amdgpu_in_reset(adev))
1279                 goto skip;
1280
1281         mutex_lock(&adev->dm.dc_lock);
1282         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283                 dc_link_dp_handle_automated_test(dc_link);
1284         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287                 dc_link_dp_handle_link_loss(dc_link);
1288                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289                 offload_work->offload_wq->is_handling_link_loss = false;
1290                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291         }
1292         mutex_unlock(&adev->dm.dc_lock);
1293
1294 skip:
1295         kfree(offload_work);
1296
1297 }
1298
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300 {
1301         int max_caps = dc->caps.max_links;
1302         int i = 0;
1303         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307         if (!hpd_rx_offload_wq)
1308                 return NULL;
1309
1310
1311         for (i = 0; i < max_caps; i++) {
1312                 hpd_rx_offload_wq[i].wq =
1313                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315                 if (hpd_rx_offload_wq[i].wq == NULL) {
1316                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317                         return NULL;
1318                 }
1319
1320                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321         }
1322
1323         return hpd_rx_offload_wq;
1324 }
1325
1326 struct amdgpu_stutter_quirk {
1327         u16 chip_vendor;
1328         u16 chip_device;
1329         u16 subsys_vendor;
1330         u16 subsys_device;
1331         u8 revision;
1332 };
1333
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337         { 0, 0, 0, 0, 0 },
1338 };
1339
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341 {
1342         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344         while (p && p->chip_device != 0) {
1345                 if (pdev->vendor == p->chip_vendor &&
1346                     pdev->device == p->chip_device &&
1347                     pdev->subsystem_vendor == p->subsys_vendor &&
1348                     pdev->subsystem_device == p->subsys_device &&
1349                     pdev->revision == p->revision) {
1350                         return true;
1351                 }
1352                 ++p;
1353         }
1354         return false;
1355 }
1356
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1358 {
1359         struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361         struct dc_callback_init init_params;
1362 #endif
1363         int r;
1364
1365         adev->dm.ddev = adev_to_drm(adev);
1366         adev->dm.adev = adev;
1367
1368         /* Zero all the fields */
1369         memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371         memset(&init_params, 0, sizeof(init_params));
1372 #endif
1373
1374         mutex_init(&adev->dm.dc_lock);
1375         mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377         spin_lock_init(&adev->dm.vblank_lock);
1378 #endif
1379
1380         if(amdgpu_dm_irq_init(adev)) {
1381                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382                 goto error;
1383         }
1384
1385         init_data.asic_id.chip_family = adev->family;
1386
1387         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389         init_data.asic_id.chip_id = adev->pdev->device;
1390
1391         init_data.asic_id.vram_width = adev->gmc.vram_width;
1392         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393         init_data.asic_id.atombios_base_address =
1394                 adev->mode_info.atom_context->bios;
1395
1396         init_data.driver = adev;
1397
1398         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400         if (!adev->dm.cgs_device) {
1401                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402                 goto error;
1403         }
1404
1405         init_data.cgs_device = adev->dm.cgs_device;
1406
1407         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
1409         switch (adev->asic_type) {
1410         case CHIP_CARRIZO:
1411         case CHIP_STONEY:
1412                 init_data.flags.gpu_vm_support = true;
1413                 break;
1414         default:
1415                 switch (adev->ip_versions[DCE_HWIP][0]) {
1416                 case IP_VERSION(2, 1, 0):
1417                         init_data.flags.gpu_vm_support = true;
1418                         switch (adev->dm.dmcub_fw_version) {
1419                         case 0: /* development */
1420                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1421                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422                                 init_data.flags.disable_dmcu = false;
1423                                 break;
1424                         default:
1425                                 init_data.flags.disable_dmcu = true;
1426                         }
1427                         break;
1428                 case IP_VERSION(1, 0, 0):
1429                 case IP_VERSION(1, 0, 1):
1430                 case IP_VERSION(3, 0, 1):
1431                 case IP_VERSION(3, 1, 2):
1432                 case IP_VERSION(3, 1, 3):
1433                         init_data.flags.gpu_vm_support = true;
1434                         break;
1435                 case IP_VERSION(2, 0, 3):
1436                         init_data.flags.disable_dmcu = true;
1437                         break;
1438                 default:
1439                         break;
1440                 }
1441                 break;
1442         }
1443
1444         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445                 init_data.flags.fbc_support = true;
1446
1447         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448                 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
1450         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451                 init_data.flags.disable_fractional_pwm = true;
1452
1453         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454                 init_data.flags.edp_no_power_sequencing = true;
1455
1456         init_data.flags.power_down_display_on_boot = true;
1457
1458         if (check_seamless_boot_capability(adev)) {
1459                 init_data.flags.power_down_display_on_boot = false;
1460                 init_data.flags.allow_seamless_boot_optimization = true;
1461                 DRM_INFO("Seamless boot condition check passed\n");
1462         }
1463
1464         INIT_LIST_HEAD(&adev->dm.da_list);
1465         /* Display Core create. */
1466         adev->dm.dc = dc_create(&init_data);
1467
1468         if (adev->dm.dc) {
1469                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1470         } else {
1471                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1472                 goto error;
1473         }
1474
1475         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478         }
1479
1480         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1482         if (dm_should_disable_stutter(adev->pdev))
1483                 adev->dm.dc->debug.disable_stutter = true;
1484
1485         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486                 adev->dm.dc->debug.disable_stutter = true;
1487
1488         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1489                 adev->dm.dc->debug.disable_dsc = true;
1490                 adev->dm.dc->debug.disable_dsc_edp = true;
1491         }
1492
1493         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494                 adev->dm.dc->debug.disable_clock_gate = true;
1495
1496         r = dm_dmub_hw_init(adev);
1497         if (r) {
1498                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499                 goto error;
1500         }
1501
1502         dc_hardware_init(adev->dm.dc);
1503
1504         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505         if (!adev->dm.hpd_rx_offload_wq) {
1506                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507                 goto error;
1508         }
1509
1510 #if defined(CONFIG_DRM_AMD_DC_DCN)
1511         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1512                 struct dc_phy_addr_space_config pa_config;
1513
1514                 mmhub_read_system_context(adev, &pa_config);
1515
1516                 // Call the DC init_memory func
1517                 dc_setup_system_context(adev->dm.dc, &pa_config);
1518         }
1519 #endif
1520
1521         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522         if (!adev->dm.freesync_module) {
1523                 DRM_ERROR(
1524                 "amdgpu: failed to initialize freesync_module.\n");
1525         } else
1526                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1527                                 adev->dm.freesync_module);
1528
1529         amdgpu_dm_init_color_mod();
1530
1531 #if defined(CONFIG_DRM_AMD_DC_DCN)
1532         if (adev->dm.dc->caps.max_links > 0) {
1533                 adev->dm.vblank_control_workqueue =
1534                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1535                 if (!adev->dm.vblank_control_workqueue)
1536                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1537         }
1538 #endif
1539
1540 #ifdef CONFIG_DRM_AMD_DC_HDCP
1541         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1542                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1543
1544                 if (!adev->dm.hdcp_workqueue)
1545                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546                 else
1547                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1548
1549                 dc_init_callbacks(adev->dm.dc, &init_params);
1550         }
1551 #endif
1552 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1554 #endif
1555         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556                 init_completion(&adev->dm.dmub_aux_transfer_done);
1557                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558                 if (!adev->dm.dmub_notify) {
1559                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560                         goto error;
1561                 }
1562
1563                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564                 if (!adev->dm.delayed_hpd_wq) {
1565                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566                         goto error;
1567                 }
1568
1569                 amdgpu_dm_outbox_init(adev);
1570 #if defined(CONFIG_DRM_AMD_DC_DCN)
1571                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572                         dmub_aux_setconfig_callback, false)) {
1573                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574                         goto error;
1575                 }
1576                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578                         goto error;
1579                 }
1580                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582                         goto error;
1583                 }
1584 #endif /* CONFIG_DRM_AMD_DC_DCN */
1585         }
1586
1587         if (amdgpu_dm_initialize_drm_device(adev)) {
1588                 DRM_ERROR(
1589                 "amdgpu: failed to initialize sw for display support.\n");
1590                 goto error;
1591         }
1592
1593         /* create fake encoders for MST */
1594         dm_dp_create_fake_mst_encoders(adev);
1595
1596         /* TODO: Add_display_info? */
1597
1598         /* TODO use dynamic cursor width */
1599         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1601
1602         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1603                 DRM_ERROR(
1604                 "amdgpu: failed to initialize sw for display support.\n");
1605                 goto error;
1606         }
1607
1608
1609         DRM_DEBUG_DRIVER("KMS initialized.\n");
1610
1611         return 0;
1612 error:
1613         amdgpu_dm_fini(adev);
1614
1615         return -EINVAL;
1616 }
1617
1618 static int amdgpu_dm_early_fini(void *handle)
1619 {
1620         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621
1622         amdgpu_dm_audio_fini(adev);
1623
1624         return 0;
1625 }
1626
1627 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1628 {
1629         int i;
1630
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632         if (adev->dm.vblank_control_workqueue) {
1633                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1634                 adev->dm.vblank_control_workqueue = NULL;
1635         }
1636 #endif
1637
1638         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1640         }
1641
1642         amdgpu_dm_destroy_drm_device(&adev->dm);
1643
1644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645         if (adev->dm.crc_rd_wrk) {
1646                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647                 kfree(adev->dm.crc_rd_wrk);
1648                 adev->dm.crc_rd_wrk = NULL;
1649         }
1650 #endif
1651 #ifdef CONFIG_DRM_AMD_DC_HDCP
1652         if (adev->dm.hdcp_workqueue) {
1653                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1654                 adev->dm.hdcp_workqueue = NULL;
1655         }
1656
1657         if (adev->dm.dc)
1658                 dc_deinit_callbacks(adev->dm.dc);
1659 #endif
1660
1661         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1662
1663         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664                 kfree(adev->dm.dmub_notify);
1665                 adev->dm.dmub_notify = NULL;
1666                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1667                 adev->dm.delayed_hpd_wq = NULL;
1668         }
1669
1670         if (adev->dm.dmub_bo)
1671                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672                                       &adev->dm.dmub_bo_gpu_addr,
1673                                       &adev->dm.dmub_bo_cpu_addr);
1674
1675         if (adev->dm.hpd_rx_offload_wq) {
1676                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1680                         }
1681                 }
1682
1683                 kfree(adev->dm.hpd_rx_offload_wq);
1684                 adev->dm.hpd_rx_offload_wq = NULL;
1685         }
1686
1687         /* DC Destroy TODO: Replace destroy DAL */
1688         if (adev->dm.dc)
1689                 dc_destroy(&adev->dm.dc);
1690         /*
1691          * TODO: pageflip, vlank interrupt
1692          *
1693          * amdgpu_dm_irq_fini(adev);
1694          */
1695
1696         if (adev->dm.cgs_device) {
1697                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698                 adev->dm.cgs_device = NULL;
1699         }
1700         if (adev->dm.freesync_module) {
1701                 mod_freesync_destroy(adev->dm.freesync_module);
1702                 adev->dm.freesync_module = NULL;
1703         }
1704
1705         mutex_destroy(&adev->dm.audio_lock);
1706         mutex_destroy(&adev->dm.dc_lock);
1707
1708         return;
1709 }
1710
1711 static int load_dmcu_fw(struct amdgpu_device *adev)
1712 {
1713         const char *fw_name_dmcu = NULL;
1714         int r;
1715         const struct dmcu_firmware_header_v1_0 *hdr;
1716
1717         switch(adev->asic_type) {
1718 #if defined(CONFIG_DRM_AMD_DC_SI)
1719         case CHIP_TAHITI:
1720         case CHIP_PITCAIRN:
1721         case CHIP_VERDE:
1722         case CHIP_OLAND:
1723 #endif
1724         case CHIP_BONAIRE:
1725         case CHIP_HAWAII:
1726         case CHIP_KAVERI:
1727         case CHIP_KABINI:
1728         case CHIP_MULLINS:
1729         case CHIP_TONGA:
1730         case CHIP_FIJI:
1731         case CHIP_CARRIZO:
1732         case CHIP_STONEY:
1733         case CHIP_POLARIS11:
1734         case CHIP_POLARIS10:
1735         case CHIP_POLARIS12:
1736         case CHIP_VEGAM:
1737         case CHIP_VEGA10:
1738         case CHIP_VEGA12:
1739         case CHIP_VEGA20:
1740                 return 0;
1741         case CHIP_NAVI12:
1742                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1743                 break;
1744         case CHIP_RAVEN:
1745                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1749                 else
1750                         return 0;
1751                 break;
1752         default:
1753                 switch (adev->ip_versions[DCE_HWIP][0]) {
1754                 case IP_VERSION(2, 0, 2):
1755                 case IP_VERSION(2, 0, 3):
1756                 case IP_VERSION(2, 0, 0):
1757                 case IP_VERSION(2, 1, 0):
1758                 case IP_VERSION(3, 0, 0):
1759                 case IP_VERSION(3, 0, 2):
1760                 case IP_VERSION(3, 0, 3):
1761                 case IP_VERSION(3, 0, 1):
1762                 case IP_VERSION(3, 1, 2):
1763                 case IP_VERSION(3, 1, 3):
1764                         return 0;
1765                 default:
1766                         break;
1767                 }
1768                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1769                 return -EINVAL;
1770         }
1771
1772         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1774                 return 0;
1775         }
1776
1777         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1778         if (r == -ENOENT) {
1779                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781                 adev->dm.fw_dmcu = NULL;
1782                 return 0;
1783         }
1784         if (r) {
1785                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1786                         fw_name_dmcu);
1787                 return r;
1788         }
1789
1790         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1791         if (r) {
1792                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1793                         fw_name_dmcu);
1794                 release_firmware(adev->dm.fw_dmcu);
1795                 adev->dm.fw_dmcu = NULL;
1796                 return r;
1797         }
1798
1799         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802         adev->firmware.fw_size +=
1803                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1804
1805         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807         adev->firmware.fw_size +=
1808                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1809
1810         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1811
1812         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1813
1814         return 0;
1815 }
1816
1817 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1818 {
1819         struct amdgpu_device *adev = ctx;
1820
1821         return dm_read_reg(adev->dm.dc->ctx, address);
1822 }
1823
1824 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1825                                      uint32_t value)
1826 {
1827         struct amdgpu_device *adev = ctx;
1828
1829         return dm_write_reg(adev->dm.dc->ctx, address, value);
1830 }
1831
1832 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1833 {
1834         struct dmub_srv_create_params create_params;
1835         struct dmub_srv_region_params region_params;
1836         struct dmub_srv_region_info region_info;
1837         struct dmub_srv_fb_params fb_params;
1838         struct dmub_srv_fb_info *fb_info;
1839         struct dmub_srv *dmub_srv;
1840         const struct dmcub_firmware_header_v1_0 *hdr;
1841         const char *fw_name_dmub;
1842         enum dmub_asic dmub_asic;
1843         enum dmub_status status;
1844         int r;
1845
1846         switch (adev->ip_versions[DCE_HWIP][0]) {
1847         case IP_VERSION(2, 1, 0):
1848                 dmub_asic = DMUB_ASIC_DCN21;
1849                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1850                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1852                 break;
1853         case IP_VERSION(3, 0, 0):
1854                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1855                         dmub_asic = DMUB_ASIC_DCN30;
1856                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1857                 } else {
1858                         dmub_asic = DMUB_ASIC_DCN30;
1859                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1860                 }
1861                 break;
1862         case IP_VERSION(3, 0, 1):
1863                 dmub_asic = DMUB_ASIC_DCN301;
1864                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1865                 break;
1866         case IP_VERSION(3, 0, 2):
1867                 dmub_asic = DMUB_ASIC_DCN302;
1868                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1869                 break;
1870         case IP_VERSION(3, 0, 3):
1871                 dmub_asic = DMUB_ASIC_DCN303;
1872                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1873                 break;
1874         case IP_VERSION(3, 1, 2):
1875         case IP_VERSION(3, 1, 3):
1876                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1877                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878                 break;
1879
1880         default:
1881                 /* ASIC doesn't support DMUB. */
1882                 return 0;
1883         }
1884
1885         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886         if (r) {
1887                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888                 return 0;
1889         }
1890
1891         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892         if (r) {
1893                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894                 return 0;
1895         }
1896
1897         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1899
1900         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902                         AMDGPU_UCODE_ID_DMCUB;
1903                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904                         adev->dm.dmub_fw;
1905                 adev->firmware.fw_size +=
1906                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1907
1908                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909                          adev->dm.dmcub_fw_version);
1910         }
1911
1912
1913         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914         dmub_srv = adev->dm.dmub_srv;
1915
1916         if (!dmub_srv) {
1917                 DRM_ERROR("Failed to allocate DMUB service!\n");
1918                 return -ENOMEM;
1919         }
1920
1921         memset(&create_params, 0, sizeof(create_params));
1922         create_params.user_ctx = adev;
1923         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925         create_params.asic = dmub_asic;
1926
1927         /* Create the DMUB service. */
1928         status = dmub_srv_create(dmub_srv, &create_params);
1929         if (status != DMUB_STATUS_OK) {
1930                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1931                 return -EINVAL;
1932         }
1933
1934         /* Calculate the size of all the regions for the DMUB service. */
1935         memset(&region_params, 0, sizeof(region_params));
1936
1937         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940         region_params.vbios_size = adev->bios_size;
1941         region_params.fw_bss_data = region_params.bss_data_size ?
1942                 adev->dm.dmub_fw->data +
1943                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945         region_params.fw_inst_const =
1946                 adev->dm.dmub_fw->data +
1947                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948                 PSP_HEADER_BYTES;
1949
1950         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951                                            &region_info);
1952
1953         if (status != DMUB_STATUS_OK) {
1954                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955                 return -EINVAL;
1956         }
1957
1958         /*
1959          * Allocate a framebuffer based on the total size of all the regions.
1960          * TODO: Move this into GART.
1961          */
1962         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964                                     &adev->dm.dmub_bo_gpu_addr,
1965                                     &adev->dm.dmub_bo_cpu_addr);
1966         if (r)
1967                 return r;
1968
1969         /* Rebase the regions on the framebuffer address. */
1970         memset(&fb_params, 0, sizeof(fb_params));
1971         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973         fb_params.region_info = &region_info;
1974
1975         adev->dm.dmub_fb_info =
1976                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977         fb_info = adev->dm.dmub_fb_info;
1978
1979         if (!fb_info) {
1980                 DRM_ERROR(
1981                         "Failed to allocate framebuffer info for DMUB service!\n");
1982                 return -ENOMEM;
1983         }
1984
1985         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986         if (status != DMUB_STATUS_OK) {
1987                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988                 return -EINVAL;
1989         }
1990
1991         return 0;
1992 }
1993
1994 static int dm_sw_init(void *handle)
1995 {
1996         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997         int r;
1998
1999         r = dm_dmub_sw_init(adev);
2000         if (r)
2001                 return r;
2002
2003         return load_dmcu_fw(adev);
2004 }
2005
2006 static int dm_sw_fini(void *handle)
2007 {
2008         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009
2010         kfree(adev->dm.dmub_fb_info);
2011         adev->dm.dmub_fb_info = NULL;
2012
2013         if (adev->dm.dmub_srv) {
2014                 dmub_srv_destroy(adev->dm.dmub_srv);
2015                 adev->dm.dmub_srv = NULL;
2016         }
2017
2018         release_firmware(adev->dm.dmub_fw);
2019         adev->dm.dmub_fw = NULL;
2020
2021         release_firmware(adev->dm.fw_dmcu);
2022         adev->dm.fw_dmcu = NULL;
2023
2024         return 0;
2025 }
2026
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2028 {
2029         struct amdgpu_dm_connector *aconnector;
2030         struct drm_connector *connector;
2031         struct drm_connector_list_iter iter;
2032         int ret = 0;
2033
2034         drm_connector_list_iter_begin(dev, &iter);
2035         drm_for_each_connector_iter(connector, &iter) {
2036                 aconnector = to_amdgpu_dm_connector(connector);
2037                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038                     aconnector->mst_mgr.aux) {
2039                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2040                                          aconnector,
2041                                          aconnector->base.base.id);
2042
2043                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044                         if (ret < 0) {
2045                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2046                                 aconnector->dc_link->type =
2047                                         dc_connection_single;
2048                                 break;
2049                         }
2050                 }
2051         }
2052         drm_connector_list_iter_end(&iter);
2053
2054         return ret;
2055 }
2056
2057 static int dm_late_init(void *handle)
2058 {
2059         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2060
2061         struct dmcu_iram_parameters params;
2062         unsigned int linear_lut[16];
2063         int i;
2064         struct dmcu *dmcu = NULL;
2065
2066         dmcu = adev->dm.dc->res_pool->dmcu;
2067
2068         for (i = 0; i < 16; i++)
2069                 linear_lut[i] = 0xFFFF * i / 15;
2070
2071         params.set = 0;
2072         params.backlight_ramping_override = false;
2073         params.backlight_ramping_start = 0xCCCC;
2074         params.backlight_ramping_reduction = 0xCCCCCCCC;
2075         params.backlight_lut_array_size = 16;
2076         params.backlight_lut_array = linear_lut;
2077
2078         /* Min backlight level after ABM reduction,  Don't allow below 1%
2079          * 0xFFFF x 0.01 = 0x28F
2080          */
2081         params.min_abm_backlight = 0x28F;
2082         /* In the case where abm is implemented on dmcub,
2083         * dmcu object will be null.
2084         * ABM 2.4 and up are implemented on dmcub.
2085         */
2086         if (dmcu) {
2087                 if (!dmcu_load_iram(dmcu, params))
2088                         return -EINVAL;
2089         } else if (adev->dm.dc->ctx->dmub_srv) {
2090                 struct dc_link *edp_links[MAX_NUM_EDP];
2091                 int edp_num;
2092
2093                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094                 for (i = 0; i < edp_num; i++) {
2095                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096                                 return -EINVAL;
2097                 }
2098         }
2099
2100         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2101 }
2102
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104 {
2105         struct amdgpu_dm_connector *aconnector;
2106         struct drm_connector *connector;
2107         struct drm_connector_list_iter iter;
2108         struct drm_dp_mst_topology_mgr *mgr;
2109         int ret;
2110         bool need_hotplug = false;
2111
2112         drm_connector_list_iter_begin(dev, &iter);
2113         drm_for_each_connector_iter(connector, &iter) {
2114                 aconnector = to_amdgpu_dm_connector(connector);
2115                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116                     aconnector->mst_port)
2117                         continue;
2118
2119                 mgr = &aconnector->mst_mgr;
2120
2121                 if (suspend) {
2122                         drm_dp_mst_topology_mgr_suspend(mgr);
2123                 } else {
2124                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2125                         if (ret < 0) {
2126                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127                                 need_hotplug = true;
2128                         }
2129                 }
2130         }
2131         drm_connector_list_iter_end(&iter);
2132
2133         if (need_hotplug)
2134                 drm_kms_helper_hotplug_event(dev);
2135 }
2136
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138 {
2139         struct smu_context *smu = &adev->smu;
2140         int ret = 0;
2141
2142         if (!is_support_sw_smu(adev))
2143                 return 0;
2144
2145         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146          * on window driver dc implementation.
2147          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148          * should be passed to smu during boot up and resume from s3.
2149          * boot up: dc calculate dcn watermark clock settings within dc_create,
2150          * dcn20_resource_construct
2151          * then call pplib functions below to pass the settings to smu:
2152          * smu_set_watermarks_for_clock_ranges
2153          * smu_set_watermarks_table
2154          * navi10_set_watermarks_table
2155          * smu_write_watermarks_table
2156          *
2157          * For Renoir, clock settings of dcn watermark are also fixed values.
2158          * dc has implemented different flow for window driver:
2159          * dc_hardware_init / dc_set_power_state
2160          * dcn10_init_hw
2161          * notify_wm_ranges
2162          * set_wm_ranges
2163          * -- Linux
2164          * smu_set_watermarks_for_clock_ranges
2165          * renoir_set_watermarks_table
2166          * smu_write_watermarks_table
2167          *
2168          * For Linux,
2169          * dc_hardware_init -> amdgpu_dm_init
2170          * dc_set_power_state --> dm_resume
2171          *
2172          * therefore, this function apply to navi10/12/14 but not Renoir
2173          * *
2174          */
2175         switch (adev->ip_versions[DCE_HWIP][0]) {
2176         case IP_VERSION(2, 0, 2):
2177         case IP_VERSION(2, 0, 0):
2178                 break;
2179         default:
2180                 return 0;
2181         }
2182
2183         ret = smu_write_watermarks_table(smu);
2184         if (ret) {
2185                 DRM_ERROR("Failed to update WMTABLE!\n");
2186                 return ret;
2187         }
2188
2189         return 0;
2190 }
2191
2192 /**
2193  * dm_hw_init() - Initialize DC device
2194  * @handle: The base driver device containing the amdgpu_dm device.
2195  *
2196  * Initialize the &struct amdgpu_display_manager device. This involves calling
2197  * the initializers of each DM component, then populating the struct with them.
2198  *
2199  * Although the function implies hardware initialization, both hardware and
2200  * software are initialized here. Splitting them out to their relevant init
2201  * hooks is a future TODO item.
2202  *
2203  * Some notable things that are initialized here:
2204  *
2205  * - Display Core, both software and hardware
2206  * - DC modules that we need (freesync and color management)
2207  * - DRM software states
2208  * - Interrupt sources and handlers
2209  * - Vblank support
2210  * - Debug FS entries, if enabled
2211  */
2212 static int dm_hw_init(void *handle)
2213 {
2214         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215         /* Create DAL display manager */
2216         amdgpu_dm_init(adev);
2217         amdgpu_dm_hpd_init(adev);
2218
2219         return 0;
2220 }
2221
2222 /**
2223  * dm_hw_fini() - Teardown DC device
2224  * @handle: The base driver device containing the amdgpu_dm device.
2225  *
2226  * Teardown components within &struct amdgpu_display_manager that require
2227  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228  * were loaded. Also flush IRQ workqueues and disable them.
2229  */
2230 static int dm_hw_fini(void *handle)
2231 {
2232         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2233
2234         amdgpu_dm_hpd_fini(adev);
2235
2236         amdgpu_dm_irq_fini(adev);
2237         amdgpu_dm_fini(adev);
2238         return 0;
2239 }
2240
2241
2242 static int dm_enable_vblank(struct drm_crtc *crtc);
2243 static void dm_disable_vblank(struct drm_crtc *crtc);
2244
2245 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246                                  struct dc_state *state, bool enable)
2247 {
2248         enum dc_irq_source irq_source;
2249         struct amdgpu_crtc *acrtc;
2250         int rc = -EBUSY;
2251         int i = 0;
2252
2253         for (i = 0; i < state->stream_count; i++) {
2254                 acrtc = get_crtc_by_otg_inst(
2255                                 adev, state->stream_status[i].primary_otg_inst);
2256
2257                 if (acrtc && state->stream_status[i].plane_count != 0) {
2258                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2260                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2262                         if (rc)
2263                                 DRM_WARN("Failed to %s pflip interrupts\n",
2264                                          enable ? "enable" : "disable");
2265
2266                         if (enable) {
2267                                 rc = dm_enable_vblank(&acrtc->base);
2268                                 if (rc)
2269                                         DRM_WARN("Failed to enable vblank interrupts\n");
2270                         } else {
2271                                 dm_disable_vblank(&acrtc->base);
2272                         }
2273
2274                 }
2275         }
2276
2277 }
2278
2279 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2280 {
2281         struct dc_state *context = NULL;
2282         enum dc_status res = DC_ERROR_UNEXPECTED;
2283         int i;
2284         struct dc_stream_state *del_streams[MAX_PIPES];
2285         int del_streams_count = 0;
2286
2287         memset(del_streams, 0, sizeof(del_streams));
2288
2289         context = dc_create_state(dc);
2290         if (context == NULL)
2291                 goto context_alloc_fail;
2292
2293         dc_resource_state_copy_construct_current(dc, context);
2294
2295         /* First remove from context all streams */
2296         for (i = 0; i < context->stream_count; i++) {
2297                 struct dc_stream_state *stream = context->streams[i];
2298
2299                 del_streams[del_streams_count++] = stream;
2300         }
2301
2302         /* Remove all planes for removed streams and then remove the streams */
2303         for (i = 0; i < del_streams_count; i++) {
2304                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305                         res = DC_FAIL_DETACH_SURFACES;
2306                         goto fail;
2307                 }
2308
2309                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2310                 if (res != DC_OK)
2311                         goto fail;
2312         }
2313
2314         res = dc_commit_state(dc, context);
2315
2316 fail:
2317         dc_release_state(context);
2318
2319 context_alloc_fail:
2320         return res;
2321 }
2322
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324 {
2325         int i;
2326
2327         if (dm->hpd_rx_offload_wq) {
2328                 for (i = 0; i < dm->dc->caps.max_links; i++)
2329                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330         }
2331 }
2332
2333 static int dm_suspend(void *handle)
2334 {
2335         struct amdgpu_device *adev = handle;
2336         struct amdgpu_display_manager *dm = &adev->dm;
2337         int ret = 0;
2338
2339         if (amdgpu_in_reset(adev)) {
2340                 mutex_lock(&dm->dc_lock);
2341
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343                 dc_allow_idle_optimizations(adev->dm.dc, false);
2344 #endif
2345
2346                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347
2348                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349
2350                 amdgpu_dm_commit_zero_streams(dm->dc);
2351
2352                 amdgpu_dm_irq_suspend(adev);
2353
2354                 hpd_rx_irq_work_suspend(dm);
2355
2356                 return ret;
2357         }
2358
2359         WARN_ON(adev->dm.cached_state);
2360         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2361
2362         s3_handle_mst(adev_to_drm(adev), true);
2363
2364         amdgpu_dm_irq_suspend(adev);
2365
2366         hpd_rx_irq_work_suspend(dm);
2367
2368         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2369
2370         return 0;
2371 }
2372
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375                                              struct drm_crtc *crtc)
2376 {
2377         uint32_t i;
2378         struct drm_connector_state *new_con_state;
2379         struct drm_connector *connector;
2380         struct drm_crtc *crtc_from_state;
2381
2382         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383                 crtc_from_state = new_con_state->crtc;
2384
2385                 if (crtc_from_state == crtc)
2386                         return to_amdgpu_dm_connector(connector);
2387         }
2388
2389         return NULL;
2390 }
2391
2392 static void emulated_link_detect(struct dc_link *link)
2393 {
2394         struct dc_sink_init_data sink_init_data = { 0 };
2395         struct display_sink_capability sink_caps = { 0 };
2396         enum dc_edid_status edid_status;
2397         struct dc_context *dc_ctx = link->ctx;
2398         struct dc_sink *sink = NULL;
2399         struct dc_sink *prev_sink = NULL;
2400
2401         link->type = dc_connection_none;
2402         prev_sink = link->local_sink;
2403
2404         if (prev_sink)
2405                 dc_sink_release(prev_sink);
2406
2407         switch (link->connector_signal) {
2408         case SIGNAL_TYPE_HDMI_TYPE_A: {
2409                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411                 break;
2412         }
2413
2414         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417                 break;
2418         }
2419
2420         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423                 break;
2424         }
2425
2426         case SIGNAL_TYPE_LVDS: {
2427                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2429                 break;
2430         }
2431
2432         case SIGNAL_TYPE_EDP: {
2433                 sink_caps.transaction_type =
2434                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435                 sink_caps.signal = SIGNAL_TYPE_EDP;
2436                 break;
2437         }
2438
2439         case SIGNAL_TYPE_DISPLAY_PORT: {
2440                 sink_caps.transaction_type =
2441                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443                 break;
2444         }
2445
2446         default:
2447                 DC_ERROR("Invalid connector type! signal:%d\n",
2448                         link->connector_signal);
2449                 return;
2450         }
2451
2452         sink_init_data.link = link;
2453         sink_init_data.sink_signal = sink_caps.signal;
2454
2455         sink = dc_sink_create(&sink_init_data);
2456         if (!sink) {
2457                 DC_ERROR("Failed to create sink!\n");
2458                 return;
2459         }
2460
2461         /* dc_sink_create returns a new reference */
2462         link->local_sink = sink;
2463
2464         edid_status = dm_helpers_read_local_edid(
2465                         link->ctx,
2466                         link,
2467                         sink);
2468
2469         if (edid_status != EDID_OK)
2470                 DC_ERROR("Failed to read EDID");
2471
2472 }
2473
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475                                      struct amdgpu_display_manager *dm)
2476 {
2477         struct {
2478                 struct dc_surface_update surface_updates[MAX_SURFACES];
2479                 struct dc_plane_info plane_infos[MAX_SURFACES];
2480                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482                 struct dc_stream_update stream_update;
2483         } * bundle;
2484         int k, m;
2485
2486         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487
2488         if (!bundle) {
2489                 dm_error("Failed to allocate update bundle\n");
2490                 goto cleanup;
2491         }
2492
2493         for (k = 0; k < dc_state->stream_count; k++) {
2494                 bundle->stream_update.stream = dc_state->streams[k];
2495
2496                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497                         bundle->surface_updates[m].surface =
2498                                 dc_state->stream_status->plane_states[m];
2499                         bundle->surface_updates[m].surface->force_full_update =
2500                                 true;
2501                 }
2502                 dc_commit_updates_for_stream(
2503                         dm->dc, bundle->surface_updates,
2504                         dc_state->stream_status->plane_count,
2505                         dc_state->streams[k], &bundle->stream_update, dc_state);
2506         }
2507
2508 cleanup:
2509         kfree(bundle);
2510
2511         return;
2512 }
2513
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2515 {
2516         struct dc_stream_state *stream_state;
2517         struct amdgpu_dm_connector *aconnector = link->priv;
2518         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519         struct dc_stream_update stream_update;
2520         bool dpms_off = true;
2521
2522         memset(&stream_update, 0, sizeof(stream_update));
2523         stream_update.dpms_off = &dpms_off;
2524
2525         mutex_lock(&adev->dm.dc_lock);
2526         stream_state = dc_stream_find_from_link(link);
2527
2528         if (stream_state == NULL) {
2529                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530                 mutex_unlock(&adev->dm.dc_lock);
2531                 return;
2532         }
2533
2534         stream_update.stream = stream_state;
2535         acrtc_state->force_dpms_off = true;
2536         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537                                      stream_state, &stream_update,
2538                                      stream_state->ctx->dc->current_state);
2539         mutex_unlock(&adev->dm.dc_lock);
2540 }
2541
2542 static int dm_resume(void *handle)
2543 {
2544         struct amdgpu_device *adev = handle;
2545         struct drm_device *ddev = adev_to_drm(adev);
2546         struct amdgpu_display_manager *dm = &adev->dm;
2547         struct amdgpu_dm_connector *aconnector;
2548         struct drm_connector *connector;
2549         struct drm_connector_list_iter iter;
2550         struct drm_crtc *crtc;
2551         struct drm_crtc_state *new_crtc_state;
2552         struct dm_crtc_state *dm_new_crtc_state;
2553         struct drm_plane *plane;
2554         struct drm_plane_state *new_plane_state;
2555         struct dm_plane_state *dm_new_plane_state;
2556         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557         enum dc_connection_type new_connection_type = dc_connection_none;
2558         struct dc_state *dc_state;
2559         int i, r, j;
2560
2561         if (amdgpu_in_reset(adev)) {
2562                 dc_state = dm->cached_dc_state;
2563
2564                 /*
2565                  * The dc->current_state is backed up into dm->cached_dc_state
2566                  * before we commit 0 streams.
2567                  *
2568                  * DC will clear link encoder assignments on the real state
2569                  * but the changes won't propagate over to the copy we made
2570                  * before the 0 streams commit.
2571                  *
2572                  * DC expects that link encoder assignments are *not* valid
2573                  * when committing a state, so as a workaround it needs to be
2574                  * cleared here.
2575                  */
2576                 link_enc_cfg_init(dm->dc, dc_state);
2577
2578                 if (dc_enable_dmub_notifications(adev->dm.dc))
2579                         amdgpu_dm_outbox_init(adev);
2580
2581                 r = dm_dmub_hw_init(adev);
2582                 if (r)
2583                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2584
2585                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2586                 dc_resume(dm->dc);
2587
2588                 amdgpu_dm_irq_resume_early(adev);
2589
2590                 for (i = 0; i < dc_state->stream_count; i++) {
2591                         dc_state->streams[i]->mode_changed = true;
2592                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2594                                         = 0xffffffff;
2595                         }
2596                 }
2597
2598                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2599
2600                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2601
2602                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2603
2604                 dc_release_state(dm->cached_dc_state);
2605                 dm->cached_dc_state = NULL;
2606
2607                 amdgpu_dm_irq_resume_late(adev);
2608
2609                 mutex_unlock(&dm->dc_lock);
2610
2611                 return 0;
2612         }
2613         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2614         dc_release_state(dm_state->context);
2615         dm_state->context = dc_create_state(dm->dc);
2616         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2617         dc_resource_state_construct(dm->dc, dm_state->context);
2618
2619         /* Re-enable outbox interrupts for DPIA. */
2620         if (dc_enable_dmub_notifications(adev->dm.dc))
2621                 amdgpu_dm_outbox_init(adev);
2622
2623         /* Before powering on DC we need to re-initialize DMUB. */
2624         r = dm_dmub_hw_init(adev);
2625         if (r)
2626                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2627
2628         /* power on hardware */
2629         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630
2631         /* program HPD filter */
2632         dc_resume(dm->dc);
2633
2634         /*
2635          * early enable HPD Rx IRQ, should be done before set mode as short
2636          * pulse interrupts are used for MST
2637          */
2638         amdgpu_dm_irq_resume_early(adev);
2639
2640         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2641         s3_handle_mst(ddev, false);
2642
2643         /* Do detection*/
2644         drm_connector_list_iter_begin(ddev, &iter);
2645         drm_for_each_connector_iter(connector, &iter) {
2646                 aconnector = to_amdgpu_dm_connector(connector);
2647
2648                 /*
2649                  * this is the case when traversing through already created
2650                  * MST connectors, should be skipped
2651                  */
2652                 if (aconnector->mst_port)
2653                         continue;
2654
2655                 mutex_lock(&aconnector->hpd_lock);
2656                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2657                         DRM_ERROR("KMS: Failed to detect connector\n");
2658
2659                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2660                         emulated_link_detect(aconnector->dc_link);
2661                 else
2662                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2663
2664                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2665                         aconnector->fake_enable = false;
2666
2667                 if (aconnector->dc_sink)
2668                         dc_sink_release(aconnector->dc_sink);
2669                 aconnector->dc_sink = NULL;
2670                 amdgpu_dm_update_connector_after_detect(aconnector);
2671                 mutex_unlock(&aconnector->hpd_lock);
2672         }
2673         drm_connector_list_iter_end(&iter);
2674
2675         /* Force mode set in atomic commit */
2676         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2677                 new_crtc_state->active_changed = true;
2678
2679         /*
2680          * atomic_check is expected to create the dc states. We need to release
2681          * them here, since they were duplicated as part of the suspend
2682          * procedure.
2683          */
2684         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2685                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2686                 if (dm_new_crtc_state->stream) {
2687                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2688                         dc_stream_release(dm_new_crtc_state->stream);
2689                         dm_new_crtc_state->stream = NULL;
2690                 }
2691         }
2692
2693         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2694                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2695                 if (dm_new_plane_state->dc_state) {
2696                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2697                         dc_plane_state_release(dm_new_plane_state->dc_state);
2698                         dm_new_plane_state->dc_state = NULL;
2699                 }
2700         }
2701
2702         drm_atomic_helper_resume(ddev, dm->cached_state);
2703
2704         dm->cached_state = NULL;
2705
2706         amdgpu_dm_irq_resume_late(adev);
2707
2708         amdgpu_dm_smu_write_watermarks_table(adev);
2709
2710         return 0;
2711 }
2712
2713 /**
2714  * DOC: DM Lifecycle
2715  *
2716  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2717  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2718  * the base driver's device list to be initialized and torn down accordingly.
2719  *
2720  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2721  */
2722
2723 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2724         .name = "dm",
2725         .early_init = dm_early_init,
2726         .late_init = dm_late_init,
2727         .sw_init = dm_sw_init,
2728         .sw_fini = dm_sw_fini,
2729         .early_fini = amdgpu_dm_early_fini,
2730         .hw_init = dm_hw_init,
2731         .hw_fini = dm_hw_fini,
2732         .suspend = dm_suspend,
2733         .resume = dm_resume,
2734         .is_idle = dm_is_idle,
2735         .wait_for_idle = dm_wait_for_idle,
2736         .check_soft_reset = dm_check_soft_reset,
2737         .soft_reset = dm_soft_reset,
2738         .set_clockgating_state = dm_set_clockgating_state,
2739         .set_powergating_state = dm_set_powergating_state,
2740 };
2741
2742 const struct amdgpu_ip_block_version dm_ip_block =
2743 {
2744         .type = AMD_IP_BLOCK_TYPE_DCE,
2745         .major = 1,
2746         .minor = 0,
2747         .rev = 0,
2748         .funcs = &amdgpu_dm_funcs,
2749 };
2750
2751
2752 /**
2753  * DOC: atomic
2754  *
2755  * *WIP*
2756  */
2757
2758 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2759         .fb_create = amdgpu_display_user_framebuffer_create,
2760         .get_format_info = amd_get_format_info,
2761         .output_poll_changed = drm_fb_helper_output_poll_changed,
2762         .atomic_check = amdgpu_dm_atomic_check,
2763         .atomic_commit = drm_atomic_helper_commit,
2764 };
2765
2766 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2767         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2768 };
2769
2770 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2771 {
2772         u32 max_cll, min_cll, max, min, q, r;
2773         struct amdgpu_dm_backlight_caps *caps;
2774         struct amdgpu_display_manager *dm;
2775         struct drm_connector *conn_base;
2776         struct amdgpu_device *adev;
2777         struct dc_link *link = NULL;
2778         static const u8 pre_computed_values[] = {
2779                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2780                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2781         int i;
2782
2783         if (!aconnector || !aconnector->dc_link)
2784                 return;
2785
2786         link = aconnector->dc_link;
2787         if (link->connector_signal != SIGNAL_TYPE_EDP)
2788                 return;
2789
2790         conn_base = &aconnector->base;
2791         adev = drm_to_adev(conn_base->dev);
2792         dm = &adev->dm;
2793         for (i = 0; i < dm->num_of_edps; i++) {
2794                 if (link == dm->backlight_link[i])
2795                         break;
2796         }
2797         if (i >= dm->num_of_edps)
2798                 return;
2799         caps = &dm->backlight_caps[i];
2800         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2801         caps->aux_support = false;
2802         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2803         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2804
2805         if (caps->ext_caps->bits.oled == 1 /*||
2806             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2807             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2808                 caps->aux_support = true;
2809
2810         if (amdgpu_backlight == 0)
2811                 caps->aux_support = false;
2812         else if (amdgpu_backlight == 1)
2813                 caps->aux_support = true;
2814
2815         /* From the specification (CTA-861-G), for calculating the maximum
2816          * luminance we need to use:
2817          *      Luminance = 50*2**(CV/32)
2818          * Where CV is a one-byte value.
2819          * For calculating this expression we may need float point precision;
2820          * to avoid this complexity level, we take advantage that CV is divided
2821          * by a constant. From the Euclids division algorithm, we know that CV
2822          * can be written as: CV = 32*q + r. Next, we replace CV in the
2823          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2824          * need to pre-compute the value of r/32. For pre-computing the values
2825          * We just used the following Ruby line:
2826          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2827          * The results of the above expressions can be verified at
2828          * pre_computed_values.
2829          */
2830         q = max_cll >> 5;
2831         r = max_cll % 32;
2832         max = (1 << q) * pre_computed_values[r];
2833
2834         // min luminance: maxLum * (CV/255)^2 / 100
2835         q = DIV_ROUND_CLOSEST(min_cll, 255);
2836         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2837
2838         caps->aux_max_input_signal = max;
2839         caps->aux_min_input_signal = min;
2840 }
2841
2842 void amdgpu_dm_update_connector_after_detect(
2843                 struct amdgpu_dm_connector *aconnector)
2844 {
2845         struct drm_connector *connector = &aconnector->base;
2846         struct drm_device *dev = connector->dev;
2847         struct dc_sink *sink;
2848
2849         /* MST handled by drm_mst framework */
2850         if (aconnector->mst_mgr.mst_state == true)
2851                 return;
2852
2853         sink = aconnector->dc_link->local_sink;
2854         if (sink)
2855                 dc_sink_retain(sink);
2856
2857         /*
2858          * Edid mgmt connector gets first update only in mode_valid hook and then
2859          * the connector sink is set to either fake or physical sink depends on link status.
2860          * Skip if already done during boot.
2861          */
2862         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2863                         && aconnector->dc_em_sink) {
2864
2865                 /*
2866                  * For S3 resume with headless use eml_sink to fake stream
2867                  * because on resume connector->sink is set to NULL
2868                  */
2869                 mutex_lock(&dev->mode_config.mutex);
2870
2871                 if (sink) {
2872                         if (aconnector->dc_sink) {
2873                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2874                                 /*
2875                                  * retain and release below are used to
2876                                  * bump up refcount for sink because the link doesn't point
2877                                  * to it anymore after disconnect, so on next crtc to connector
2878                                  * reshuffle by UMD we will get into unwanted dc_sink release
2879                                  */
2880                                 dc_sink_release(aconnector->dc_sink);
2881                         }
2882                         aconnector->dc_sink = sink;
2883                         dc_sink_retain(aconnector->dc_sink);
2884                         amdgpu_dm_update_freesync_caps(connector,
2885                                         aconnector->edid);
2886                 } else {
2887                         amdgpu_dm_update_freesync_caps(connector, NULL);
2888                         if (!aconnector->dc_sink) {
2889                                 aconnector->dc_sink = aconnector->dc_em_sink;
2890                                 dc_sink_retain(aconnector->dc_sink);
2891                         }
2892                 }
2893
2894                 mutex_unlock(&dev->mode_config.mutex);
2895
2896                 if (sink)
2897                         dc_sink_release(sink);
2898                 return;
2899         }
2900
2901         /*
2902          * TODO: temporary guard to look for proper fix
2903          * if this sink is MST sink, we should not do anything
2904          */
2905         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2906                 dc_sink_release(sink);
2907                 return;
2908         }
2909
2910         if (aconnector->dc_sink == sink) {
2911                 /*
2912                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2913                  * Do nothing!!
2914                  */
2915                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2916                                 aconnector->connector_id);
2917                 if (sink)
2918                         dc_sink_release(sink);
2919                 return;
2920         }
2921
2922         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2923                 aconnector->connector_id, aconnector->dc_sink, sink);
2924
2925         mutex_lock(&dev->mode_config.mutex);
2926
2927         /*
2928          * 1. Update status of the drm connector
2929          * 2. Send an event and let userspace tell us what to do
2930          */
2931         if (sink) {
2932                 /*
2933                  * TODO: check if we still need the S3 mode update workaround.
2934                  * If yes, put it here.
2935                  */
2936                 if (aconnector->dc_sink) {
2937                         amdgpu_dm_update_freesync_caps(connector, NULL);
2938                         dc_sink_release(aconnector->dc_sink);
2939                 }
2940
2941                 aconnector->dc_sink = sink;
2942                 dc_sink_retain(aconnector->dc_sink);
2943                 if (sink->dc_edid.length == 0) {
2944                         aconnector->edid = NULL;
2945                         if (aconnector->dc_link->aux_mode) {
2946                                 drm_dp_cec_unset_edid(
2947                                         &aconnector->dm_dp_aux.aux);
2948                         }
2949                 } else {
2950                         aconnector->edid =
2951                                 (struct edid *)sink->dc_edid.raw_edid;
2952
2953                         drm_connector_update_edid_property(connector,
2954                                                            aconnector->edid);
2955                         if (aconnector->dc_link->aux_mode)
2956                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2957                                                     aconnector->edid);
2958                 }
2959
2960                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2961                 update_connector_ext_caps(aconnector);
2962         } else {
2963                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2964                 amdgpu_dm_update_freesync_caps(connector, NULL);
2965                 drm_connector_update_edid_property(connector, NULL);
2966                 aconnector->num_modes = 0;
2967                 dc_sink_release(aconnector->dc_sink);
2968                 aconnector->dc_sink = NULL;
2969                 aconnector->edid = NULL;
2970 #ifdef CONFIG_DRM_AMD_DC_HDCP
2971                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2972                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2973                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2974 #endif
2975         }
2976
2977         mutex_unlock(&dev->mode_config.mutex);
2978
2979         update_subconnector_property(aconnector);
2980
2981         if (sink)
2982                 dc_sink_release(sink);
2983 }
2984
2985 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2986 {
2987         struct drm_connector *connector = &aconnector->base;
2988         struct drm_device *dev = connector->dev;
2989         enum dc_connection_type new_connection_type = dc_connection_none;
2990         struct amdgpu_device *adev = drm_to_adev(dev);
2991         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2992         struct dm_crtc_state *dm_crtc_state = NULL;
2993
2994         if (adev->dm.disable_hpd_irq)
2995                 return;
2996
2997         if (dm_con_state->base.state && dm_con_state->base.crtc)
2998                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2999                                         dm_con_state->base.state,
3000                                         dm_con_state->base.crtc));
3001         /*
3002          * In case of failure or MST no need to update connector status or notify the OS
3003          * since (for MST case) MST does this in its own context.
3004          */
3005         mutex_lock(&aconnector->hpd_lock);
3006
3007 #ifdef CONFIG_DRM_AMD_DC_HDCP
3008         if (adev->dm.hdcp_workqueue) {
3009                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3010                 dm_con_state->update_hdcp = true;
3011         }
3012 #endif
3013         if (aconnector->fake_enable)
3014                 aconnector->fake_enable = false;
3015
3016         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3017                 DRM_ERROR("KMS: Failed to detect connector\n");
3018
3019         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3020                 emulated_link_detect(aconnector->dc_link);
3021
3022                 drm_modeset_lock_all(dev);
3023                 dm_restore_drm_connector_state(dev, connector);
3024                 drm_modeset_unlock_all(dev);
3025
3026                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027                         drm_kms_helper_connector_hotplug_event(connector);
3028
3029         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3030                 if (new_connection_type == dc_connection_none &&
3031                     aconnector->dc_link->type == dc_connection_none &&
3032                     dm_crtc_state)
3033                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3034
3035                 amdgpu_dm_update_connector_after_detect(aconnector);
3036
3037                 drm_modeset_lock_all(dev);
3038                 dm_restore_drm_connector_state(dev, connector);
3039                 drm_modeset_unlock_all(dev);
3040
3041                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042                         drm_kms_helper_connector_hotplug_event(connector);
3043         }
3044         mutex_unlock(&aconnector->hpd_lock);
3045
3046 }
3047
3048 static void handle_hpd_irq(void *param)
3049 {
3050         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3051
3052         handle_hpd_irq_helper(aconnector);
3053
3054 }
3055
3056 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3057 {
3058         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3059         uint8_t dret;
3060         bool new_irq_handled = false;
3061         int dpcd_addr;
3062         int dpcd_bytes_to_read;
3063
3064         const int max_process_count = 30;
3065         int process_count = 0;
3066
3067         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3068
3069         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3070                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3071                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3072                 dpcd_addr = DP_SINK_COUNT;
3073         } else {
3074                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3075                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3076                 dpcd_addr = DP_SINK_COUNT_ESI;
3077         }
3078
3079         dret = drm_dp_dpcd_read(
3080                 &aconnector->dm_dp_aux.aux,
3081                 dpcd_addr,
3082                 esi,
3083                 dpcd_bytes_to_read);
3084
3085         while (dret == dpcd_bytes_to_read &&
3086                 process_count < max_process_count) {
3087                 uint8_t retry;
3088                 dret = 0;
3089
3090                 process_count++;
3091
3092                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3093                 /* handle HPD short pulse irq */
3094                 if (aconnector->mst_mgr.mst_state)
3095                         drm_dp_mst_hpd_irq(
3096                                 &aconnector->mst_mgr,
3097                                 esi,
3098                                 &new_irq_handled);
3099
3100                 if (new_irq_handled) {
3101                         /* ACK at DPCD to notify down stream */
3102                         const int ack_dpcd_bytes_to_write =
3103                                 dpcd_bytes_to_read - 1;
3104
3105                         for (retry = 0; retry < 3; retry++) {
3106                                 uint8_t wret;
3107
3108                                 wret = drm_dp_dpcd_write(
3109                                         &aconnector->dm_dp_aux.aux,
3110                                         dpcd_addr + 1,
3111                                         &esi[1],
3112                                         ack_dpcd_bytes_to_write);
3113                                 if (wret == ack_dpcd_bytes_to_write)
3114                                         break;
3115                         }
3116
3117                         /* check if there is new irq to be handled */
3118                         dret = drm_dp_dpcd_read(
3119                                 &aconnector->dm_dp_aux.aux,
3120                                 dpcd_addr,
3121                                 esi,
3122                                 dpcd_bytes_to_read);
3123
3124                         new_irq_handled = false;
3125                 } else {
3126                         break;
3127                 }
3128         }
3129
3130         if (process_count == max_process_count)
3131                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3132 }
3133
3134 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3135                                                         union hpd_irq_data hpd_irq_data)
3136 {
3137         struct hpd_rx_irq_offload_work *offload_work =
3138                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3139
3140         if (!offload_work) {
3141                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3142                 return;
3143         }
3144
3145         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3146         offload_work->data = hpd_irq_data;
3147         offload_work->offload_wq = offload_wq;
3148
3149         queue_work(offload_wq->wq, &offload_work->work);
3150         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3151 }
3152
3153 static void handle_hpd_rx_irq(void *param)
3154 {
3155         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3156         struct drm_connector *connector = &aconnector->base;
3157         struct drm_device *dev = connector->dev;
3158         struct dc_link *dc_link = aconnector->dc_link;
3159         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3160         bool result = false;
3161         enum dc_connection_type new_connection_type = dc_connection_none;
3162         struct amdgpu_device *adev = drm_to_adev(dev);
3163         union hpd_irq_data hpd_irq_data;
3164         bool link_loss = false;
3165         bool has_left_work = false;
3166         int idx = aconnector->base.index;
3167         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3168
3169         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3170
3171         if (adev->dm.disable_hpd_irq)
3172                 return;
3173
3174         /*
3175          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3176          * conflict, after implement i2c helper, this mutex should be
3177          * retired.
3178          */
3179         mutex_lock(&aconnector->hpd_lock);
3180
3181         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3182                                                 &link_loss, true, &has_left_work);
3183
3184         if (!has_left_work)
3185                 goto out;
3186
3187         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3188                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3189                 goto out;
3190         }
3191
3192         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3193                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3194                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3195                         dm_handle_mst_sideband_msg(aconnector);
3196                         goto out;
3197                 }
3198
3199                 if (link_loss) {
3200                         bool skip = false;
3201
3202                         spin_lock(&offload_wq->offload_lock);
3203                         skip = offload_wq->is_handling_link_loss;
3204
3205                         if (!skip)
3206                                 offload_wq->is_handling_link_loss = true;
3207
3208                         spin_unlock(&offload_wq->offload_lock);
3209
3210                         if (!skip)
3211                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3212
3213                         goto out;
3214                 }
3215         }
3216
3217 out:
3218         if (result && !is_mst_root_connector) {
3219                 /* Downstream Port status changed. */
3220                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3221                         DRM_ERROR("KMS: Failed to detect connector\n");
3222
3223                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3224                         emulated_link_detect(dc_link);
3225
3226                         if (aconnector->fake_enable)
3227                                 aconnector->fake_enable = false;
3228
3229                         amdgpu_dm_update_connector_after_detect(aconnector);
3230
3231
3232                         drm_modeset_lock_all(dev);
3233                         dm_restore_drm_connector_state(dev, connector);
3234                         drm_modeset_unlock_all(dev);
3235
3236                         drm_kms_helper_connector_hotplug_event(connector);
3237                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3238
3239                         if (aconnector->fake_enable)
3240                                 aconnector->fake_enable = false;
3241
3242                         amdgpu_dm_update_connector_after_detect(aconnector);
3243
3244
3245                         drm_modeset_lock_all(dev);
3246                         dm_restore_drm_connector_state(dev, connector);
3247                         drm_modeset_unlock_all(dev);
3248
3249                         drm_kms_helper_connector_hotplug_event(connector);
3250                 }
3251         }
3252 #ifdef CONFIG_DRM_AMD_DC_HDCP
3253         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3254                 if (adev->dm.hdcp_workqueue)
3255                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3256         }
3257 #endif
3258
3259         if (dc_link->type != dc_connection_mst_branch)
3260                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3261
3262         mutex_unlock(&aconnector->hpd_lock);
3263 }
3264
3265 static void register_hpd_handlers(struct amdgpu_device *adev)
3266 {
3267         struct drm_device *dev = adev_to_drm(adev);
3268         struct drm_connector *connector;
3269         struct amdgpu_dm_connector *aconnector;
3270         const struct dc_link *dc_link;
3271         struct dc_interrupt_params int_params = {0};
3272
3273         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3274         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3275
3276         list_for_each_entry(connector,
3277                         &dev->mode_config.connector_list, head) {
3278
3279                 aconnector = to_amdgpu_dm_connector(connector);
3280                 dc_link = aconnector->dc_link;
3281
3282                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3283                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3284                         int_params.irq_source = dc_link->irq_source_hpd;
3285
3286                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3287                                         handle_hpd_irq,
3288                                         (void *) aconnector);
3289                 }
3290
3291                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3292
3293                         /* Also register for DP short pulse (hpd_rx). */
3294                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3295                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3296
3297                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3298                                         handle_hpd_rx_irq,
3299                                         (void *) aconnector);
3300
3301                         if (adev->dm.hpd_rx_offload_wq)
3302                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3303                                         aconnector;
3304                 }
3305         }
3306 }
3307
3308 #if defined(CONFIG_DRM_AMD_DC_SI)
3309 /* Register IRQ sources and initialize IRQ callbacks */
3310 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3311 {
3312         struct dc *dc = adev->dm.dc;
3313         struct common_irq_params *c_irq_params;
3314         struct dc_interrupt_params int_params = {0};
3315         int r;
3316         int i;
3317         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3318
3319         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3320         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3321
3322         /*
3323          * Actions of amdgpu_irq_add_id():
3324          * 1. Register a set() function with base driver.
3325          *    Base driver will call set() function to enable/disable an
3326          *    interrupt in DC hardware.
3327          * 2. Register amdgpu_dm_irq_handler().
3328          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3329          *    coming from DC hardware.
3330          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3331          *    for acknowledging and handling. */
3332
3333         /* Use VBLANK interrupt */
3334         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3335                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3336                 if (r) {
3337                         DRM_ERROR("Failed to add crtc irq id!\n");
3338                         return r;
3339                 }
3340
3341                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3342                 int_params.irq_source =
3343                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3344
3345                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3346
3347                 c_irq_params->adev = adev;
3348                 c_irq_params->irq_src = int_params.irq_source;
3349
3350                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351                                 dm_crtc_high_irq, c_irq_params);
3352         }
3353
3354         /* Use GRPH_PFLIP interrupt */
3355         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3356                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3357                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3358                 if (r) {
3359                         DRM_ERROR("Failed to add page flip irq id!\n");
3360                         return r;
3361                 }
3362
3363                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364                 int_params.irq_source =
3365                         dc_interrupt_to_irq_source(dc, i, 0);
3366
3367                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3368
3369                 c_irq_params->adev = adev;
3370                 c_irq_params->irq_src = int_params.irq_source;
3371
3372                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373                                 dm_pflip_high_irq, c_irq_params);
3374
3375         }
3376
3377         /* HPD */
3378         r = amdgpu_irq_add_id(adev, client_id,
3379                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3380         if (r) {
3381                 DRM_ERROR("Failed to add hpd irq id!\n");
3382                 return r;
3383         }
3384
3385         register_hpd_handlers(adev);
3386
3387         return 0;
3388 }
3389 #endif
3390
3391 /* Register IRQ sources and initialize IRQ callbacks */
3392 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3393 {
3394         struct dc *dc = adev->dm.dc;
3395         struct common_irq_params *c_irq_params;
3396         struct dc_interrupt_params int_params = {0};
3397         int r;
3398         int i;
3399         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3400
3401         if (adev->family >= AMDGPU_FAMILY_AI)
3402                 client_id = SOC15_IH_CLIENTID_DCE;
3403
3404         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3405         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3406
3407         /*
3408          * Actions of amdgpu_irq_add_id():
3409          * 1. Register a set() function with base driver.
3410          *    Base driver will call set() function to enable/disable an
3411          *    interrupt in DC hardware.
3412          * 2. Register amdgpu_dm_irq_handler().
3413          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3414          *    coming from DC hardware.
3415          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3416          *    for acknowledging and handling. */
3417
3418         /* Use VBLANK interrupt */
3419         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3420                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3421                 if (r) {
3422                         DRM_ERROR("Failed to add crtc irq id!\n");
3423                         return r;
3424                 }
3425
3426                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427                 int_params.irq_source =
3428                         dc_interrupt_to_irq_source(dc, i, 0);
3429
3430                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3431
3432                 c_irq_params->adev = adev;
3433                 c_irq_params->irq_src = int_params.irq_source;
3434
3435                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436                                 dm_crtc_high_irq, c_irq_params);
3437         }
3438
3439         /* Use VUPDATE interrupt */
3440         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3441                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3442                 if (r) {
3443                         DRM_ERROR("Failed to add vupdate irq id!\n");
3444                         return r;
3445                 }
3446
3447                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3448                 int_params.irq_source =
3449                         dc_interrupt_to_irq_source(dc, i, 0);
3450
3451                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3452
3453                 c_irq_params->adev = adev;
3454                 c_irq_params->irq_src = int_params.irq_source;
3455
3456                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3457                                 dm_vupdate_high_irq, c_irq_params);
3458         }
3459
3460         /* Use GRPH_PFLIP interrupt */
3461         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3462                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3463                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3464                 if (r) {
3465                         DRM_ERROR("Failed to add page flip irq id!\n");
3466                         return r;
3467                 }
3468
3469                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3470                 int_params.irq_source =
3471                         dc_interrupt_to_irq_source(dc, i, 0);
3472
3473                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3474
3475                 c_irq_params->adev = adev;
3476                 c_irq_params->irq_src = int_params.irq_source;
3477
3478                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3479                                 dm_pflip_high_irq, c_irq_params);
3480
3481         }
3482
3483         /* HPD */
3484         r = amdgpu_irq_add_id(adev, client_id,
3485                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3486         if (r) {
3487                 DRM_ERROR("Failed to add hpd irq id!\n");
3488                 return r;
3489         }
3490
3491         register_hpd_handlers(adev);
3492
3493         return 0;
3494 }
3495
3496 #if defined(CONFIG_DRM_AMD_DC_DCN)
3497 /* Register IRQ sources and initialize IRQ callbacks */
3498 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3499 {
3500         struct dc *dc = adev->dm.dc;
3501         struct common_irq_params *c_irq_params;
3502         struct dc_interrupt_params int_params = {0};
3503         int r;
3504         int i;
3505 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3506         static const unsigned int vrtl_int_srcid[] = {
3507                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3508                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3509                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3510                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3511                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3512                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3513         };
3514 #endif
3515
3516         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3517         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3518
3519         /*
3520          * Actions of amdgpu_irq_add_id():
3521          * 1. Register a set() function with base driver.
3522          *    Base driver will call set() function to enable/disable an
3523          *    interrupt in DC hardware.
3524          * 2. Register amdgpu_dm_irq_handler().
3525          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3526          *    coming from DC hardware.
3527          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3528          *    for acknowledging and handling.
3529          */
3530
3531         /* Use VSTARTUP interrupt */
3532         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3533                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3534                         i++) {
3535                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3536
3537                 if (r) {
3538                         DRM_ERROR("Failed to add crtc irq id!\n");
3539                         return r;
3540                 }
3541
3542                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3543                 int_params.irq_source =
3544                         dc_interrupt_to_irq_source(dc, i, 0);
3545
3546                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3547
3548                 c_irq_params->adev = adev;
3549                 c_irq_params->irq_src = int_params.irq_source;
3550
3551                 amdgpu_dm_irq_register_interrupt(
3552                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3553         }
3554
3555         /* Use otg vertical line interrupt */
3556 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3557         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3558                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3559                                 vrtl_int_srcid[i], &adev->vline0_irq);
3560
3561                 if (r) {
3562                         DRM_ERROR("Failed to add vline0 irq id!\n");
3563                         return r;
3564                 }
3565
3566                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3567                 int_params.irq_source =
3568                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3569
3570                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3571                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3572                         break;
3573                 }
3574
3575                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3576                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3577
3578                 c_irq_params->adev = adev;
3579                 c_irq_params->irq_src = int_params.irq_source;
3580
3581                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3583         }
3584 #endif
3585
3586         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3587          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3588          * to trigger at end of each vblank, regardless of state of the lock,
3589          * matching DCE behaviour.
3590          */
3591         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3592              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3593              i++) {
3594                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3595
3596                 if (r) {
3597                         DRM_ERROR("Failed to add vupdate irq id!\n");
3598                         return r;
3599                 }
3600
3601                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3602                 int_params.irq_source =
3603                         dc_interrupt_to_irq_source(dc, i, 0);
3604
3605                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3606
3607                 c_irq_params->adev = adev;
3608                 c_irq_params->irq_src = int_params.irq_source;
3609
3610                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3611                                 dm_vupdate_high_irq, c_irq_params);
3612         }
3613
3614         /* Use GRPH_PFLIP interrupt */
3615         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3616                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3617                         i++) {
3618                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3619                 if (r) {
3620                         DRM_ERROR("Failed to add page flip irq id!\n");
3621                         return r;
3622                 }
3623
3624                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3625                 int_params.irq_source =
3626                         dc_interrupt_to_irq_source(dc, i, 0);
3627
3628                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3629
3630                 c_irq_params->adev = adev;
3631                 c_irq_params->irq_src = int_params.irq_source;
3632
3633                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3634                                 dm_pflip_high_irq, c_irq_params);
3635
3636         }
3637
3638         /* HPD */
3639         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3640                         &adev->hpd_irq);
3641         if (r) {
3642                 DRM_ERROR("Failed to add hpd irq id!\n");
3643                 return r;
3644         }
3645
3646         register_hpd_handlers(adev);
3647
3648         return 0;
3649 }
3650 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3651 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3652 {
3653         struct dc *dc = adev->dm.dc;
3654         struct common_irq_params *c_irq_params;
3655         struct dc_interrupt_params int_params = {0};
3656         int r, i;
3657
3658         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3659         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3660
3661         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3662                         &adev->dmub_outbox_irq);
3663         if (r) {
3664                 DRM_ERROR("Failed to add outbox irq id!\n");
3665                 return r;
3666         }
3667
3668         if (dc->ctx->dmub_srv) {
3669                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3670                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3671                 int_params.irq_source =
3672                 dc_interrupt_to_irq_source(dc, i, 0);
3673
3674                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3675
3676                 c_irq_params->adev = adev;
3677                 c_irq_params->irq_src = int_params.irq_source;
3678
3679                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3680                                 dm_dmub_outbox1_low_irq, c_irq_params);
3681         }
3682
3683         return 0;
3684 }
3685 #endif
3686
3687 /*
3688  * Acquires the lock for the atomic state object and returns
3689  * the new atomic state.
3690  *
3691  * This should only be called during atomic check.
3692  */
3693 static int dm_atomic_get_state(struct drm_atomic_state *state,
3694                                struct dm_atomic_state **dm_state)
3695 {
3696         struct drm_device *dev = state->dev;
3697         struct amdgpu_device *adev = drm_to_adev(dev);
3698         struct amdgpu_display_manager *dm = &adev->dm;
3699         struct drm_private_state *priv_state;
3700
3701         if (*dm_state)
3702                 return 0;
3703
3704         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3705         if (IS_ERR(priv_state))
3706                 return PTR_ERR(priv_state);
3707
3708         *dm_state = to_dm_atomic_state(priv_state);
3709
3710         return 0;
3711 }
3712
3713 static struct dm_atomic_state *
3714 dm_atomic_get_new_state(struct drm_atomic_state *state)
3715 {
3716         struct drm_device *dev = state->dev;
3717         struct amdgpu_device *adev = drm_to_adev(dev);
3718         struct amdgpu_display_manager *dm = &adev->dm;
3719         struct drm_private_obj *obj;
3720         struct drm_private_state *new_obj_state;
3721         int i;
3722
3723         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3724                 if (obj->funcs == dm->atomic_obj.funcs)
3725                         return to_dm_atomic_state(new_obj_state);
3726         }
3727
3728         return NULL;
3729 }
3730
3731 static struct drm_private_state *
3732 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3733 {
3734         struct dm_atomic_state *old_state, *new_state;
3735
3736         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3737         if (!new_state)
3738                 return NULL;
3739
3740         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3741
3742         old_state = to_dm_atomic_state(obj->state);
3743
3744         if (old_state && old_state->context)
3745                 new_state->context = dc_copy_state(old_state->context);
3746
3747         if (!new_state->context) {
3748                 kfree(new_state);
3749                 return NULL;
3750         }
3751
3752         return &new_state->base;
3753 }
3754
3755 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3756                                     struct drm_private_state *state)
3757 {
3758         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3759
3760         if (dm_state && dm_state->context)
3761                 dc_release_state(dm_state->context);
3762
3763         kfree(dm_state);
3764 }
3765
3766 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3767         .atomic_duplicate_state = dm_atomic_duplicate_state,
3768         .atomic_destroy_state = dm_atomic_destroy_state,
3769 };
3770
3771 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3772 {
3773         struct dm_atomic_state *state;
3774         int r;
3775
3776         adev->mode_info.mode_config_initialized = true;
3777
3778         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3779         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3780
3781         adev_to_drm(adev)->mode_config.max_width = 16384;
3782         adev_to_drm(adev)->mode_config.max_height = 16384;
3783
3784         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3785         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3786         /* indicates support for immediate flip */
3787         adev_to_drm(adev)->mode_config.async_page_flip = true;
3788
3789         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3790
3791         state = kzalloc(sizeof(*state), GFP_KERNEL);
3792         if (!state)
3793                 return -ENOMEM;
3794
3795         state->context = dc_create_state(adev->dm.dc);
3796         if (!state->context) {
3797                 kfree(state);
3798                 return -ENOMEM;
3799         }
3800
3801         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3802
3803         drm_atomic_private_obj_init(adev_to_drm(adev),
3804                                     &adev->dm.atomic_obj,
3805                                     &state->base,
3806                                     &dm_atomic_state_funcs);
3807
3808         r = amdgpu_display_modeset_create_props(adev);
3809         if (r) {
3810                 dc_release_state(state->context);
3811                 kfree(state);
3812                 return r;
3813         }
3814
3815         r = amdgpu_dm_audio_init(adev);
3816         if (r) {
3817                 dc_release_state(state->context);
3818                 kfree(state);
3819                 return r;
3820         }
3821
3822         return 0;
3823 }
3824
3825 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3826 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3827 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3828
3829 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3830         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3831
3832 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3833                                             int bl_idx)
3834 {
3835 #if defined(CONFIG_ACPI)
3836         struct amdgpu_dm_backlight_caps caps;
3837
3838         memset(&caps, 0, sizeof(caps));
3839
3840         if (dm->backlight_caps[bl_idx].caps_valid)
3841                 return;
3842
3843         amdgpu_acpi_get_backlight_caps(&caps);
3844         if (caps.caps_valid) {
3845                 dm->backlight_caps[bl_idx].caps_valid = true;
3846                 if (caps.aux_support)
3847                         return;
3848                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3849                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3850         } else {
3851                 dm->backlight_caps[bl_idx].min_input_signal =
3852                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3853                 dm->backlight_caps[bl_idx].max_input_signal =
3854                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3855         }
3856 #else
3857         if (dm->backlight_caps[bl_idx].aux_support)
3858                 return;
3859
3860         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3861         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3862 #endif
3863 }
3864
3865 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3866                                 unsigned *min, unsigned *max)
3867 {
3868         if (!caps)
3869                 return 0;
3870
3871         if (caps->aux_support) {
3872                 // Firmware limits are in nits, DC API wants millinits.
3873                 *max = 1000 * caps->aux_max_input_signal;
3874                 *min = 1000 * caps->aux_min_input_signal;
3875         } else {
3876                 // Firmware limits are 8-bit, PWM control is 16-bit.
3877                 *max = 0x101 * caps->max_input_signal;
3878                 *min = 0x101 * caps->min_input_signal;
3879         }
3880         return 1;
3881 }
3882
3883 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3884                                         uint32_t brightness)
3885 {
3886         unsigned min, max;
3887
3888         if (!get_brightness_range(caps, &min, &max))
3889                 return brightness;
3890
3891         // Rescale 0..255 to min..max
3892         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3893                                        AMDGPU_MAX_BL_LEVEL);
3894 }
3895
3896 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3897                                       uint32_t brightness)
3898 {
3899         unsigned min, max;
3900
3901         if (!get_brightness_range(caps, &min, &max))
3902                 return brightness;
3903
3904         if (brightness < min)
3905                 return 0;
3906         // Rescale min..max to 0..255
3907         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3908                                  max - min);
3909 }
3910
3911 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3912                                          int bl_idx,
3913                                          u32 user_brightness)
3914 {
3915         struct amdgpu_dm_backlight_caps caps;
3916         struct dc_link *link;
3917         u32 brightness;
3918         bool rc;
3919
3920         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3921         caps = dm->backlight_caps[bl_idx];
3922
3923         dm->brightness[bl_idx] = user_brightness;
3924         /* update scratch register */
3925         if (bl_idx == 0)
3926                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3927         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3928         link = (struct dc_link *)dm->backlight_link[bl_idx];
3929
3930         /* Change brightness based on AUX property */
3931         if (caps.aux_support) {
3932                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3933                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3934                 if (!rc)
3935                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3936         } else {
3937                 rc = dc_link_set_backlight_level(link, brightness, 0);
3938                 if (!rc)
3939                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3940         }
3941
3942         return rc ? 0 : 1;
3943 }
3944
3945 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3946 {
3947         struct amdgpu_display_manager *dm = bl_get_data(bd);
3948         int i;
3949
3950         for (i = 0; i < dm->num_of_edps; i++) {
3951                 if (bd == dm->backlight_dev[i])
3952                         break;
3953         }
3954         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3955                 i = 0;
3956         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3957
3958         return 0;
3959 }
3960
3961 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3962                                          int bl_idx)
3963 {
3964         struct amdgpu_dm_backlight_caps caps;
3965         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3966
3967         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3968         caps = dm->backlight_caps[bl_idx];
3969
3970         if (caps.aux_support) {
3971                 u32 avg, peak;
3972                 bool rc;
3973
3974                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3975                 if (!rc)
3976                         return dm->brightness[bl_idx];
3977                 return convert_brightness_to_user(&caps, avg);
3978         } else {
3979                 int ret = dc_link_get_backlight_level(link);
3980
3981                 if (ret == DC_ERROR_UNEXPECTED)
3982                         return dm->brightness[bl_idx];
3983                 return convert_brightness_to_user(&caps, ret);
3984         }
3985 }
3986
3987 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3988 {
3989         struct amdgpu_display_manager *dm = bl_get_data(bd);
3990         int i;
3991
3992         for (i = 0; i < dm->num_of_edps; i++) {
3993                 if (bd == dm->backlight_dev[i])
3994                         break;
3995         }
3996         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3997                 i = 0;
3998         return amdgpu_dm_backlight_get_level(dm, i);
3999 }
4000
4001 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4002         .options = BL_CORE_SUSPENDRESUME,
4003         .get_brightness = amdgpu_dm_backlight_get_brightness,
4004         .update_status  = amdgpu_dm_backlight_update_status,
4005 };
4006
4007 static void
4008 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4009 {
4010         char bl_name[16];
4011         struct backlight_properties props = { 0 };
4012
4013         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4014         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4015
4016         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4017         props.brightness = AMDGPU_MAX_BL_LEVEL;
4018         props.type = BACKLIGHT_RAW;
4019
4020         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4021                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4022
4023         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4024                                                                        adev_to_drm(dm->adev)->dev,
4025                                                                        dm,
4026                                                                        &amdgpu_dm_backlight_ops,
4027                                                                        &props);
4028
4029         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4030                 DRM_ERROR("DM: Backlight registration failed!\n");
4031         else
4032                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4033 }
4034 #endif
4035
4036 static int initialize_plane(struct amdgpu_display_manager *dm,
4037                             struct amdgpu_mode_info *mode_info, int plane_id,
4038                             enum drm_plane_type plane_type,
4039                             const struct dc_plane_cap *plane_cap)
4040 {
4041         struct drm_plane *plane;
4042         unsigned long possible_crtcs;
4043         int ret = 0;
4044
4045         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4046         if (!plane) {
4047                 DRM_ERROR("KMS: Failed to allocate plane\n");
4048                 return -ENOMEM;
4049         }
4050         plane->type = plane_type;
4051
4052         /*
4053          * HACK: IGT tests expect that the primary plane for a CRTC
4054          * can only have one possible CRTC. Only expose support for
4055          * any CRTC if they're not going to be used as a primary plane
4056          * for a CRTC - like overlay or underlay planes.
4057          */
4058         possible_crtcs = 1 << plane_id;
4059         if (plane_id >= dm->dc->caps.max_streams)
4060                 possible_crtcs = 0xff;
4061
4062         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4063
4064         if (ret) {
4065                 DRM_ERROR("KMS: Failed to initialize plane\n");
4066                 kfree(plane);
4067                 return ret;
4068         }
4069
4070         if (mode_info)
4071                 mode_info->planes[plane_id] = plane;
4072
4073         return ret;
4074 }
4075
4076
4077 static void register_backlight_device(struct amdgpu_display_manager *dm,
4078                                       struct dc_link *link)
4079 {
4080 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4081         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4082
4083         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4084             link->type != dc_connection_none) {
4085                 /*
4086                  * Event if registration failed, we should continue with
4087                  * DM initialization because not having a backlight control
4088                  * is better then a black screen.
4089                  */
4090                 if (!dm->backlight_dev[dm->num_of_edps])
4091                         amdgpu_dm_register_backlight_device(dm);
4092
4093                 if (dm->backlight_dev[dm->num_of_edps]) {
4094                         dm->backlight_link[dm->num_of_edps] = link;
4095                         dm->num_of_edps++;
4096                 }
4097         }
4098 #endif
4099 }
4100
4101
4102 /*
4103  * In this architecture, the association
4104  * connector -> encoder -> crtc
4105  * id not really requried. The crtc and connector will hold the
4106  * display_index as an abstraction to use with DAL component
4107  *
4108  * Returns 0 on success
4109  */
4110 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4111 {
4112         struct amdgpu_display_manager *dm = &adev->dm;
4113         int32_t i;
4114         struct amdgpu_dm_connector *aconnector = NULL;
4115         struct amdgpu_encoder *aencoder = NULL;
4116         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4117         uint32_t link_cnt;
4118         int32_t primary_planes;
4119         enum dc_connection_type new_connection_type = dc_connection_none;
4120         const struct dc_plane_cap *plane;
4121         bool psr_feature_enabled = false;
4122
4123         dm->display_indexes_num = dm->dc->caps.max_streams;
4124         /* Update the actual used number of crtc */
4125         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4126
4127         link_cnt = dm->dc->caps.max_links;
4128         if (amdgpu_dm_mode_config_init(dm->adev)) {
4129                 DRM_ERROR("DM: Failed to initialize mode config\n");
4130                 return -EINVAL;
4131         }
4132
4133         /* There is one primary plane per CRTC */
4134         primary_planes = dm->dc->caps.max_streams;
4135         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4136
4137         /*
4138          * Initialize primary planes, implicit planes for legacy IOCTLS.
4139          * Order is reversed to match iteration order in atomic check.
4140          */
4141         for (i = (primary_planes - 1); i >= 0; i--) {
4142                 plane = &dm->dc->caps.planes[i];
4143
4144                 if (initialize_plane(dm, mode_info, i,
4145                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4146                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4147                         goto fail;
4148                 }
4149         }
4150
4151         /*
4152          * Initialize overlay planes, index starting after primary planes.
4153          * These planes have a higher DRM index than the primary planes since
4154          * they should be considered as having a higher z-order.
4155          * Order is reversed to match iteration order in atomic check.
4156          *
4157          * Only support DCN for now, and only expose one so we don't encourage
4158          * userspace to use up all the pipes.
4159          */
4160         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4161                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4162
4163                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4164                         continue;
4165
4166                 if (!plane->blends_with_above || !plane->blends_with_below)
4167                         continue;
4168
4169                 if (!plane->pixel_format_support.argb8888)
4170                         continue;
4171
4172                 if (initialize_plane(dm, NULL, primary_planes + i,
4173                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4174                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4175                         goto fail;
4176                 }
4177
4178                 /* Only create one overlay plane. */
4179                 break;
4180         }
4181
4182         for (i = 0; i < dm->dc->caps.max_streams; i++)
4183                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4184                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4185                         goto fail;
4186                 }
4187
4188 #if defined(CONFIG_DRM_AMD_DC_DCN)
4189         /* Use Outbox interrupt */
4190         switch (adev->ip_versions[DCE_HWIP][0]) {
4191         case IP_VERSION(3, 0, 0):
4192         case IP_VERSION(3, 1, 2):
4193         case IP_VERSION(3, 1, 3):
4194         case IP_VERSION(2, 1, 0):
4195                 if (register_outbox_irq_handlers(dm->adev)) {
4196                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4197                         goto fail;
4198                 }
4199                 break;
4200         default:
4201                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4202                               adev->ip_versions[DCE_HWIP][0]);
4203         }
4204
4205         /* Determine whether to enable PSR support by default. */
4206         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4207                 switch (adev->ip_versions[DCE_HWIP][0]) {
4208                 case IP_VERSION(3, 1, 2):
4209                 case IP_VERSION(3, 1, 3):
4210                         psr_feature_enabled = true;
4211                         break;
4212                 default:
4213                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4214                         break;
4215                 }
4216         }
4217 #endif
4218
4219         /* loops over all connectors on the board */
4220         for (i = 0; i < link_cnt; i++) {
4221                 struct dc_link *link = NULL;
4222
4223                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4224                         DRM_ERROR(
4225                                 "KMS: Cannot support more than %d display indexes\n",
4226                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4227                         continue;
4228                 }
4229
4230                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4231                 if (!aconnector)
4232                         goto fail;
4233
4234                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4235                 if (!aencoder)
4236                         goto fail;
4237
4238                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4239                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4240                         goto fail;
4241                 }
4242
4243                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4244                         DRM_ERROR("KMS: Failed to initialize connector\n");
4245                         goto fail;
4246                 }
4247
4248                 link = dc_get_link_at_index(dm->dc, i);
4249
4250                 if (!dc_link_detect_sink(link, &new_connection_type))
4251                         DRM_ERROR("KMS: Failed to detect connector\n");
4252
4253                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4254                         emulated_link_detect(link);
4255                         amdgpu_dm_update_connector_after_detect(aconnector);
4256
4257                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4258                         amdgpu_dm_update_connector_after_detect(aconnector);
4259                         register_backlight_device(dm, link);
4260                         if (dm->num_of_edps)
4261                                 update_connector_ext_caps(aconnector);
4262                         if (psr_feature_enabled)
4263                                 amdgpu_dm_set_psr_caps(link);
4264                 }
4265
4266
4267         }
4268
4269         /*
4270          * Disable vblank IRQs aggressively for power-saving.
4271          *
4272          * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4273          * is also supported.
4274          */
4275         adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4276
4277         /* Software is initialized. Now we can register interrupt handlers. */
4278         switch (adev->asic_type) {
4279 #if defined(CONFIG_DRM_AMD_DC_SI)
4280         case CHIP_TAHITI:
4281         case CHIP_PITCAIRN:
4282         case CHIP_VERDE:
4283         case CHIP_OLAND:
4284                 if (dce60_register_irq_handlers(dm->adev)) {
4285                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4286                         goto fail;
4287                 }
4288                 break;
4289 #endif
4290         case CHIP_BONAIRE:
4291         case CHIP_HAWAII:
4292         case CHIP_KAVERI:
4293         case CHIP_KABINI:
4294         case CHIP_MULLINS:
4295         case CHIP_TONGA:
4296         case CHIP_FIJI:
4297         case CHIP_CARRIZO:
4298         case CHIP_STONEY:
4299         case CHIP_POLARIS11:
4300         case CHIP_POLARIS10:
4301         case CHIP_POLARIS12:
4302         case CHIP_VEGAM:
4303         case CHIP_VEGA10:
4304         case CHIP_VEGA12:
4305         case CHIP_VEGA20:
4306                 if (dce110_register_irq_handlers(dm->adev)) {
4307                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4308                         goto fail;
4309                 }
4310                 break;
4311         default:
4312 #if defined(CONFIG_DRM_AMD_DC_DCN)
4313                 switch (adev->ip_versions[DCE_HWIP][0]) {
4314                 case IP_VERSION(1, 0, 0):
4315                 case IP_VERSION(1, 0, 1):
4316                 case IP_VERSION(2, 0, 2):
4317                 case IP_VERSION(2, 0, 3):
4318                 case IP_VERSION(2, 0, 0):
4319                 case IP_VERSION(2, 1, 0):
4320                 case IP_VERSION(3, 0, 0):
4321                 case IP_VERSION(3, 0, 2):
4322                 case IP_VERSION(3, 0, 3):
4323                 case IP_VERSION(3, 0, 1):
4324                 case IP_VERSION(3, 1, 2):
4325                 case IP_VERSION(3, 1, 3):
4326                         if (dcn10_register_irq_handlers(dm->adev)) {
4327                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4328                                 goto fail;
4329                         }
4330                         break;
4331                 default:
4332                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4333                                         adev->ip_versions[DCE_HWIP][0]);
4334                         goto fail;
4335                 }
4336 #endif
4337                 break;
4338         }
4339
4340         return 0;
4341 fail:
4342         kfree(aencoder);
4343         kfree(aconnector);
4344
4345         return -EINVAL;
4346 }
4347
4348 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4349 {
4350         drm_atomic_private_obj_fini(&dm->atomic_obj);
4351         return;
4352 }
4353
4354 /******************************************************************************
4355  * amdgpu_display_funcs functions
4356  *****************************************************************************/
4357
4358 /*
4359  * dm_bandwidth_update - program display watermarks
4360  *
4361  * @adev: amdgpu_device pointer
4362  *
4363  * Calculate and program the display watermarks and line buffer allocation.
4364  */
4365 static void dm_bandwidth_update(struct amdgpu_device *adev)
4366 {
4367         /* TODO: implement later */
4368 }
4369
4370 static const struct amdgpu_display_funcs dm_display_funcs = {
4371         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4372         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4373         .backlight_set_level = NULL, /* never called for DC */
4374         .backlight_get_level = NULL, /* never called for DC */
4375         .hpd_sense = NULL,/* called unconditionally */
4376         .hpd_set_polarity = NULL, /* called unconditionally */
4377         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4378         .page_flip_get_scanoutpos =
4379                 dm_crtc_get_scanoutpos,/* called unconditionally */
4380         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4381         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4382 };
4383
4384 #if defined(CONFIG_DEBUG_KERNEL_DC)
4385
4386 static ssize_t s3_debug_store(struct device *device,
4387                               struct device_attribute *attr,
4388                               const char *buf,
4389                               size_t count)
4390 {
4391         int ret;
4392         int s3_state;
4393         struct drm_device *drm_dev = dev_get_drvdata(device);
4394         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4395
4396         ret = kstrtoint(buf, 0, &s3_state);
4397
4398         if (ret == 0) {
4399                 if (s3_state) {
4400                         dm_resume(adev);
4401                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4402                 } else
4403                         dm_suspend(adev);
4404         }
4405
4406         return ret == 0 ? count : 0;
4407 }
4408
4409 DEVICE_ATTR_WO(s3_debug);
4410
4411 #endif
4412
4413 static int dm_early_init(void *handle)
4414 {
4415         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4416
4417         switch (adev->asic_type) {
4418 #if defined(CONFIG_DRM_AMD_DC_SI)
4419         case CHIP_TAHITI:
4420         case CHIP_PITCAIRN:
4421         case CHIP_VERDE:
4422                 adev->mode_info.num_crtc = 6;
4423                 adev->mode_info.num_hpd = 6;
4424                 adev->mode_info.num_dig = 6;
4425                 break;
4426         case CHIP_OLAND:
4427                 adev->mode_info.num_crtc = 2;
4428                 adev->mode_info.num_hpd = 2;
4429                 adev->mode_info.num_dig = 2;
4430                 break;
4431 #endif
4432         case CHIP_BONAIRE:
4433         case CHIP_HAWAII:
4434                 adev->mode_info.num_crtc = 6;
4435                 adev->mode_info.num_hpd = 6;
4436                 adev->mode_info.num_dig = 6;
4437                 break;
4438         case CHIP_KAVERI:
4439                 adev->mode_info.num_crtc = 4;
4440                 adev->mode_info.num_hpd = 6;
4441                 adev->mode_info.num_dig = 7;
4442                 break;
4443         case CHIP_KABINI:
4444         case CHIP_MULLINS:
4445                 adev->mode_info.num_crtc = 2;
4446                 adev->mode_info.num_hpd = 6;
4447                 adev->mode_info.num_dig = 6;
4448                 break;
4449         case CHIP_FIJI:
4450         case CHIP_TONGA:
4451                 adev->mode_info.num_crtc = 6;
4452                 adev->mode_info.num_hpd = 6;
4453                 adev->mode_info.num_dig = 7;
4454                 break;
4455         case CHIP_CARRIZO:
4456                 adev->mode_info.num_crtc = 3;
4457                 adev->mode_info.num_hpd = 6;
4458                 adev->mode_info.num_dig = 9;
4459                 break;
4460         case CHIP_STONEY:
4461                 adev->mode_info.num_crtc = 2;
4462                 adev->mode_info.num_hpd = 6;
4463                 adev->mode_info.num_dig = 9;
4464                 break;
4465         case CHIP_POLARIS11:
4466         case CHIP_POLARIS12:
4467                 adev->mode_info.num_crtc = 5;
4468                 adev->mode_info.num_hpd = 5;
4469                 adev->mode_info.num_dig = 5;
4470                 break;
4471         case CHIP_POLARIS10:
4472         case CHIP_VEGAM:
4473                 adev->mode_info.num_crtc = 6;
4474                 adev->mode_info.num_hpd = 6;
4475                 adev->mode_info.num_dig = 6;
4476                 break;
4477         case CHIP_VEGA10:
4478         case CHIP_VEGA12:
4479         case CHIP_VEGA20:
4480                 adev->mode_info.num_crtc = 6;
4481                 adev->mode_info.num_hpd = 6;
4482                 adev->mode_info.num_dig = 6;
4483                 break;
4484         default:
4485 #if defined(CONFIG_DRM_AMD_DC_DCN)
4486                 switch (adev->ip_versions[DCE_HWIP][0]) {
4487                 case IP_VERSION(2, 0, 2):
4488                 case IP_VERSION(3, 0, 0):
4489                         adev->mode_info.num_crtc = 6;
4490                         adev->mode_info.num_hpd = 6;
4491                         adev->mode_info.num_dig = 6;
4492                         break;
4493                 case IP_VERSION(2, 0, 0):
4494                 case IP_VERSION(3, 0, 2):
4495                         adev->mode_info.num_crtc = 5;
4496                         adev->mode_info.num_hpd = 5;
4497                         adev->mode_info.num_dig = 5;
4498                         break;
4499                 case IP_VERSION(2, 0, 3):
4500                 case IP_VERSION(3, 0, 3):
4501                         adev->mode_info.num_crtc = 2;
4502                         adev->mode_info.num_hpd = 2;
4503                         adev->mode_info.num_dig = 2;
4504                         break;
4505                 case IP_VERSION(1, 0, 0):
4506                 case IP_VERSION(1, 0, 1):
4507                 case IP_VERSION(3, 0, 1):
4508                 case IP_VERSION(2, 1, 0):
4509                 case IP_VERSION(3, 1, 2):
4510                 case IP_VERSION(3, 1, 3):
4511                         adev->mode_info.num_crtc = 4;
4512                         adev->mode_info.num_hpd = 4;
4513                         adev->mode_info.num_dig = 4;
4514                         break;
4515                 default:
4516                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4517                                         adev->ip_versions[DCE_HWIP][0]);
4518                         return -EINVAL;
4519                 }
4520 #endif
4521                 break;
4522         }
4523
4524         amdgpu_dm_set_irq_funcs(adev);
4525
4526         if (adev->mode_info.funcs == NULL)
4527                 adev->mode_info.funcs = &dm_display_funcs;
4528
4529         /*
4530          * Note: Do NOT change adev->audio_endpt_rreg and
4531          * adev->audio_endpt_wreg because they are initialised in
4532          * amdgpu_device_init()
4533          */
4534 #if defined(CONFIG_DEBUG_KERNEL_DC)
4535         device_create_file(
4536                 adev_to_drm(adev)->dev,
4537                 &dev_attr_s3_debug);
4538 #endif
4539
4540         return 0;
4541 }
4542
4543 static bool modeset_required(struct drm_crtc_state *crtc_state,
4544                              struct dc_stream_state *new_stream,
4545                              struct dc_stream_state *old_stream)
4546 {
4547         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4548 }
4549
4550 static bool modereset_required(struct drm_crtc_state *crtc_state)
4551 {
4552         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4553 }
4554
4555 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4556 {
4557         drm_encoder_cleanup(encoder);
4558         kfree(encoder);
4559 }
4560
4561 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4562         .destroy = amdgpu_dm_encoder_destroy,
4563 };
4564
4565
4566 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4567                                          struct drm_framebuffer *fb,
4568                                          int *min_downscale, int *max_upscale)
4569 {
4570         struct amdgpu_device *adev = drm_to_adev(dev);
4571         struct dc *dc = adev->dm.dc;
4572         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4573         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4574
4575         switch (fb->format->format) {
4576         case DRM_FORMAT_P010:
4577         case DRM_FORMAT_NV12:
4578         case DRM_FORMAT_NV21:
4579                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4580                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4581                 break;
4582
4583         case DRM_FORMAT_XRGB16161616F:
4584         case DRM_FORMAT_ARGB16161616F:
4585         case DRM_FORMAT_XBGR16161616F:
4586         case DRM_FORMAT_ABGR16161616F:
4587                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4588                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4589                 break;
4590
4591         default:
4592                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4593                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4594                 break;
4595         }
4596
4597         /*
4598          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4599          * scaling factor of 1.0 == 1000 units.
4600          */
4601         if (*max_upscale == 1)
4602                 *max_upscale = 1000;
4603
4604         if (*min_downscale == 1)
4605                 *min_downscale = 1000;
4606 }
4607
4608
4609 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4610                                 const struct drm_plane_state *state,
4611                                 struct dc_scaling_info *scaling_info)
4612 {
4613         int scale_w, scale_h, min_downscale, max_upscale;
4614
4615         memset(scaling_info, 0, sizeof(*scaling_info));
4616
4617         /* Source is fixed 16.16 but we ignore mantissa for now... */
4618         scaling_info->src_rect.x = state->src_x >> 16;
4619         scaling_info->src_rect.y = state->src_y >> 16;
4620
4621         /*
4622          * For reasons we don't (yet) fully understand a non-zero
4623          * src_y coordinate into an NV12 buffer can cause a
4624          * system hang on DCN1x.
4625          * To avoid hangs (and maybe be overly cautious)
4626          * let's reject both non-zero src_x and src_y.
4627          *
4628          * We currently know of only one use-case to reproduce a
4629          * scenario with non-zero src_x and src_y for NV12, which
4630          * is to gesture the YouTube Android app into full screen
4631          * on ChromeOS.
4632          */
4633         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4634             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4635             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4636             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4637                 return -EINVAL;
4638
4639         scaling_info->src_rect.width = state->src_w >> 16;
4640         if (scaling_info->src_rect.width == 0)
4641                 return -EINVAL;
4642
4643         scaling_info->src_rect.height = state->src_h >> 16;
4644         if (scaling_info->src_rect.height == 0)
4645                 return -EINVAL;
4646
4647         scaling_info->dst_rect.x = state->crtc_x;
4648         scaling_info->dst_rect.y = state->crtc_y;
4649
4650         if (state->crtc_w == 0)
4651                 return -EINVAL;
4652
4653         scaling_info->dst_rect.width = state->crtc_w;
4654
4655         if (state->crtc_h == 0)
4656                 return -EINVAL;
4657
4658         scaling_info->dst_rect.height = state->crtc_h;
4659
4660         /* DRM doesn't specify clipping on destination output. */
4661         scaling_info->clip_rect = scaling_info->dst_rect;
4662
4663         /* Validate scaling per-format with DC plane caps */
4664         if (state->plane && state->plane->dev && state->fb) {
4665                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4666                                              &min_downscale, &max_upscale);
4667         } else {
4668                 min_downscale = 250;
4669                 max_upscale = 16000;
4670         }
4671
4672         scale_w = scaling_info->dst_rect.width * 1000 /
4673                   scaling_info->src_rect.width;
4674
4675         if (scale_w < min_downscale || scale_w > max_upscale)
4676                 return -EINVAL;
4677
4678         scale_h = scaling_info->dst_rect.height * 1000 /
4679                   scaling_info->src_rect.height;
4680
4681         if (scale_h < min_downscale || scale_h > max_upscale)
4682                 return -EINVAL;
4683
4684         /*
4685          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4686          * assume reasonable defaults based on the format.
4687          */
4688
4689         return 0;
4690 }
4691
4692 static void
4693 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4694                                  uint64_t tiling_flags)
4695 {
4696         /* Fill GFX8 params */
4697         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4698                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4699
4700                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4701                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4702                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4703                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4704                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4705
4706                 /* XXX fix me for VI */
4707                 tiling_info->gfx8.num_banks = num_banks;
4708                 tiling_info->gfx8.array_mode =
4709                                 DC_ARRAY_2D_TILED_THIN1;
4710                 tiling_info->gfx8.tile_split = tile_split;
4711                 tiling_info->gfx8.bank_width = bankw;
4712                 tiling_info->gfx8.bank_height = bankh;
4713                 tiling_info->gfx8.tile_aspect = mtaspect;
4714                 tiling_info->gfx8.tile_mode =
4715                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4716         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4717                         == DC_ARRAY_1D_TILED_THIN1) {
4718                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4719         }
4720
4721         tiling_info->gfx8.pipe_config =
4722                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4723 }
4724
4725 static void
4726 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4727                                   union dc_tiling_info *tiling_info)
4728 {
4729         tiling_info->gfx9.num_pipes =
4730                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4731         tiling_info->gfx9.num_banks =
4732                 adev->gfx.config.gb_addr_config_fields.num_banks;
4733         tiling_info->gfx9.pipe_interleave =
4734                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4735         tiling_info->gfx9.num_shader_engines =
4736                 adev->gfx.config.gb_addr_config_fields.num_se;
4737         tiling_info->gfx9.max_compressed_frags =
4738                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4739         tiling_info->gfx9.num_rb_per_se =
4740                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4741         tiling_info->gfx9.shaderEnable = 1;
4742         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4743                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4744 }
4745
4746 static int
4747 validate_dcc(struct amdgpu_device *adev,
4748              const enum surface_pixel_format format,
4749              const enum dc_rotation_angle rotation,
4750              const union dc_tiling_info *tiling_info,
4751              const struct dc_plane_dcc_param *dcc,
4752              const struct dc_plane_address *address,
4753              const struct plane_size *plane_size)
4754 {
4755         struct dc *dc = adev->dm.dc;
4756         struct dc_dcc_surface_param input;
4757         struct dc_surface_dcc_cap output;
4758
4759         memset(&input, 0, sizeof(input));
4760         memset(&output, 0, sizeof(output));
4761
4762         if (!dcc->enable)
4763                 return 0;
4764
4765         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4766             !dc->cap_funcs.get_dcc_compression_cap)
4767                 return -EINVAL;
4768
4769         input.format = format;
4770         input.surface_size.width = plane_size->surface_size.width;
4771         input.surface_size.height = plane_size->surface_size.height;
4772         input.swizzle_mode = tiling_info->gfx9.swizzle;
4773
4774         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4775                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4776         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4777                 input.scan = SCAN_DIRECTION_VERTICAL;
4778
4779         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4780                 return -EINVAL;
4781
4782         if (!output.capable)
4783                 return -EINVAL;
4784
4785         if (dcc->independent_64b_blks == 0 &&
4786             output.grph.rgb.independent_64b_blks != 0)
4787                 return -EINVAL;
4788
4789         return 0;
4790 }
4791
4792 static bool
4793 modifier_has_dcc(uint64_t modifier)
4794 {
4795         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4796 }
4797
4798 static unsigned
4799 modifier_gfx9_swizzle_mode(uint64_t modifier)
4800 {
4801         if (modifier == DRM_FORMAT_MOD_LINEAR)
4802                 return 0;
4803
4804         return AMD_FMT_MOD_GET(TILE, modifier);
4805 }
4806
4807 static const struct drm_format_info *
4808 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4809 {
4810         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4811 }
4812
4813 static void
4814 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4815                                     union dc_tiling_info *tiling_info,
4816                                     uint64_t modifier)
4817 {
4818         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4819         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4820         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4821         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4822
4823         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4824
4825         if (!IS_AMD_FMT_MOD(modifier))
4826                 return;
4827
4828         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4829         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4830
4831         if (adev->family >= AMDGPU_FAMILY_NV) {
4832                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4833         } else {
4834                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4835
4836                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4837         }
4838 }
4839
4840 enum dm_micro_swizzle {
4841         MICRO_SWIZZLE_Z = 0,
4842         MICRO_SWIZZLE_S = 1,
4843         MICRO_SWIZZLE_D = 2,
4844         MICRO_SWIZZLE_R = 3
4845 };
4846
4847 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4848                                           uint32_t format,
4849                                           uint64_t modifier)
4850 {
4851         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4852         const struct drm_format_info *info = drm_format_info(format);
4853         int i;
4854
4855         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4856
4857         if (!info)
4858                 return false;
4859
4860         /*
4861          * We always have to allow these modifiers:
4862          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4863          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4864          */
4865         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4866             modifier == DRM_FORMAT_MOD_INVALID) {
4867                 return true;
4868         }
4869
4870         /* Check that the modifier is on the list of the plane's supported modifiers. */
4871         for (i = 0; i < plane->modifier_count; i++) {
4872                 if (modifier == plane->modifiers[i])
4873                         break;
4874         }
4875         if (i == plane->modifier_count)
4876                 return false;
4877
4878         /*
4879          * For D swizzle the canonical modifier depends on the bpp, so check
4880          * it here.
4881          */
4882         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4883             adev->family >= AMDGPU_FAMILY_NV) {
4884                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4885                         return false;
4886         }
4887
4888         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4889             info->cpp[0] < 8)
4890                 return false;
4891
4892         if (modifier_has_dcc(modifier)) {
4893                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4894                 if (info->cpp[0] != 4)
4895                         return false;
4896                 /* We support multi-planar formats, but not when combined with
4897                  * additional DCC metadata planes. */
4898                 if (info->num_planes > 1)
4899                         return false;
4900         }
4901
4902         return true;
4903 }
4904
4905 static void
4906 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4907 {
4908         if (!*mods)
4909                 return;
4910
4911         if (*cap - *size < 1) {
4912                 uint64_t new_cap = *cap * 2;
4913                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4914
4915                 if (!new_mods) {
4916                         kfree(*mods);
4917                         *mods = NULL;
4918                         return;
4919                 }
4920
4921                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4922                 kfree(*mods);
4923                 *mods = new_mods;
4924                 *cap = new_cap;
4925         }
4926
4927         (*mods)[*size] = mod;
4928         *size += 1;
4929 }
4930
4931 static void
4932 add_gfx9_modifiers(const struct amdgpu_device *adev,
4933                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4934 {
4935         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4936         int pipe_xor_bits = min(8, pipes +
4937                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4938         int bank_xor_bits = min(8 - pipe_xor_bits,
4939                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4940         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4941                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4942
4943
4944         if (adev->family == AMDGPU_FAMILY_RV) {
4945                 /* Raven2 and later */
4946                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4947
4948                 /*
4949                  * No _D DCC swizzles yet because we only allow 32bpp, which
4950                  * doesn't support _D on DCN
4951                  */
4952
4953                 if (has_constant_encode) {
4954                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4955                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4956                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4957                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4958                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4959                                     AMD_FMT_MOD_SET(DCC, 1) |
4960                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4961                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4963                 }
4964
4965                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4966                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4967                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4968                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4969                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4970                             AMD_FMT_MOD_SET(DCC, 1) |
4971                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4972                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4973                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4974
4975                 if (has_constant_encode) {
4976                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4977                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4978                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4979                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4980                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4981                                     AMD_FMT_MOD_SET(DCC, 1) |
4982                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4983                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4984                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4985
4986                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4987                                     AMD_FMT_MOD_SET(RB, rb) |
4988                                     AMD_FMT_MOD_SET(PIPE, pipes));
4989                 }
4990
4991                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4996                             AMD_FMT_MOD_SET(DCC, 1) |
4997                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4998                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4999                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5000                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5001                             AMD_FMT_MOD_SET(RB, rb) |
5002                             AMD_FMT_MOD_SET(PIPE, pipes));
5003         }
5004
5005         /*
5006          * Only supported for 64bpp on Raven, will be filtered on format in
5007          * dm_plane_format_mod_supported.
5008          */
5009         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5011                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5014
5015         if (adev->family == AMDGPU_FAMILY_RV) {
5016                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5021         }
5022
5023         /*
5024          * Only supported for 64bpp on Raven, will be filtered on format in
5025          * dm_plane_format_mod_supported.
5026          */
5027         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5029                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5030
5031         if (adev->family == AMDGPU_FAMILY_RV) {
5032                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5034                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5035         }
5036 }
5037
5038 static void
5039 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5040                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5041 {
5042         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5043
5044         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5046                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5048                     AMD_FMT_MOD_SET(DCC, 1) |
5049                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5050                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5052
5053         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5055                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5056                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057                     AMD_FMT_MOD_SET(DCC, 1) |
5058                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5062
5063         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5065                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5067
5068         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5071                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5072
5073
5074         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5075         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5077                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5078
5079         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 }
5083
5084 static void
5085 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5086                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5087 {
5088         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5089         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5090
5091         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5094                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5096                     AMD_FMT_MOD_SET(DCC, 1) |
5097                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5098                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5099                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5100                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5101
5102         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5104                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5105                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5107                     AMD_FMT_MOD_SET(DCC, 1) |
5108                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5109                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5111
5112         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5117                     AMD_FMT_MOD_SET(DCC, 1) |
5118                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5119                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5120                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5121                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5122                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5123
5124         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5126                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5129                     AMD_FMT_MOD_SET(DCC, 1) |
5130                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5133                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5134
5135         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5140
5141         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5146
5147         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5148         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5150                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151
5152         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5154                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 }
5156
5157 static int
5158 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5159 {
5160         uint64_t size = 0, capacity = 128;
5161         *mods = NULL;
5162
5163         /* We have not hooked up any pre-GFX9 modifiers. */
5164         if (adev->family < AMDGPU_FAMILY_AI)
5165                 return 0;
5166
5167         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5168
5169         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5170                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5171                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5172                 return *mods ? 0 : -ENOMEM;
5173         }
5174
5175         switch (adev->family) {
5176         case AMDGPU_FAMILY_AI:
5177         case AMDGPU_FAMILY_RV:
5178                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5179                 break;
5180         case AMDGPU_FAMILY_NV:
5181         case AMDGPU_FAMILY_VGH:
5182         case AMDGPU_FAMILY_YC:
5183                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5184                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5185                 else
5186                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5187                 break;
5188         }
5189
5190         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5191
5192         /* INVALID marks the end of the list. */
5193         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5194
5195         if (!*mods)
5196                 return -ENOMEM;
5197
5198         return 0;
5199 }
5200
5201 static int
5202 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5203                                           const struct amdgpu_framebuffer *afb,
5204                                           const enum surface_pixel_format format,
5205                                           const enum dc_rotation_angle rotation,
5206                                           const struct plane_size *plane_size,
5207                                           union dc_tiling_info *tiling_info,
5208                                           struct dc_plane_dcc_param *dcc,
5209                                           struct dc_plane_address *address,
5210                                           const bool force_disable_dcc)
5211 {
5212         const uint64_t modifier = afb->base.modifier;
5213         int ret = 0;
5214
5215         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5216         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5217
5218         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5219                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5220                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5221                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5222
5223                 dcc->enable = 1;
5224                 dcc->meta_pitch = afb->base.pitches[1];
5225                 dcc->independent_64b_blks = independent_64b_blks;
5226                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5227                         if (independent_64b_blks && independent_128b_blks)
5228                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5229                         else if (independent_128b_blks)
5230                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5231                         else if (independent_64b_blks && !independent_128b_blks)
5232                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5233                         else
5234                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5235                 } else {
5236                         if (independent_64b_blks)
5237                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5238                         else
5239                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5240                 }
5241
5242                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5243                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5244         }
5245
5246         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5247         if (ret)
5248                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5249
5250         return ret;
5251 }
5252
5253 static int
5254 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5255                              const struct amdgpu_framebuffer *afb,
5256                              const enum surface_pixel_format format,
5257                              const enum dc_rotation_angle rotation,
5258                              const uint64_t tiling_flags,
5259                              union dc_tiling_info *tiling_info,
5260                              struct plane_size *plane_size,
5261                              struct dc_plane_dcc_param *dcc,
5262                              struct dc_plane_address *address,
5263                              bool tmz_surface,
5264                              bool force_disable_dcc)
5265 {
5266         const struct drm_framebuffer *fb = &afb->base;
5267         int ret;
5268
5269         memset(tiling_info, 0, sizeof(*tiling_info));
5270         memset(plane_size, 0, sizeof(*plane_size));
5271         memset(dcc, 0, sizeof(*dcc));
5272         memset(address, 0, sizeof(*address));
5273
5274         address->tmz_surface = tmz_surface;
5275
5276         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5277                 uint64_t addr = afb->address + fb->offsets[0];
5278
5279                 plane_size->surface_size.x = 0;
5280                 plane_size->surface_size.y = 0;
5281                 plane_size->surface_size.width = fb->width;
5282                 plane_size->surface_size.height = fb->height;
5283                 plane_size->surface_pitch =
5284                         fb->pitches[0] / fb->format->cpp[0];
5285
5286                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5287                 address->grph.addr.low_part = lower_32_bits(addr);
5288                 address->grph.addr.high_part = upper_32_bits(addr);
5289         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5290                 uint64_t luma_addr = afb->address + fb->offsets[0];
5291                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5292
5293                 plane_size->surface_size.x = 0;
5294                 plane_size->surface_size.y = 0;
5295                 plane_size->surface_size.width = fb->width;
5296                 plane_size->surface_size.height = fb->height;
5297                 plane_size->surface_pitch =
5298                         fb->pitches[0] / fb->format->cpp[0];
5299
5300                 plane_size->chroma_size.x = 0;
5301                 plane_size->chroma_size.y = 0;
5302                 /* TODO: set these based on surface format */
5303                 plane_size->chroma_size.width = fb->width / 2;
5304                 plane_size->chroma_size.height = fb->height / 2;
5305
5306                 plane_size->chroma_pitch =
5307                         fb->pitches[1] / fb->format->cpp[1];
5308
5309                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5310                 address->video_progressive.luma_addr.low_part =
5311                         lower_32_bits(luma_addr);
5312                 address->video_progressive.luma_addr.high_part =
5313                         upper_32_bits(luma_addr);
5314                 address->video_progressive.chroma_addr.low_part =
5315                         lower_32_bits(chroma_addr);
5316                 address->video_progressive.chroma_addr.high_part =
5317                         upper_32_bits(chroma_addr);
5318         }
5319
5320         if (adev->family >= AMDGPU_FAMILY_AI) {
5321                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5322                                                                 rotation, plane_size,
5323                                                                 tiling_info, dcc,
5324                                                                 address,
5325                                                                 force_disable_dcc);
5326                 if (ret)
5327                         return ret;
5328         } else {
5329                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5330         }
5331
5332         return 0;
5333 }
5334
5335 static void
5336 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5337                                bool *per_pixel_alpha, bool *global_alpha,
5338                                int *global_alpha_value)
5339 {
5340         *per_pixel_alpha = false;
5341         *global_alpha = false;
5342         *global_alpha_value = 0xff;
5343
5344         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5345                 return;
5346
5347         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5348                 static const uint32_t alpha_formats[] = {
5349                         DRM_FORMAT_ARGB8888,
5350                         DRM_FORMAT_RGBA8888,
5351                         DRM_FORMAT_ABGR8888,
5352                 };
5353                 uint32_t format = plane_state->fb->format->format;
5354                 unsigned int i;
5355
5356                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5357                         if (format == alpha_formats[i]) {
5358                                 *per_pixel_alpha = true;
5359                                 break;
5360                         }
5361                 }
5362         }
5363
5364         if (plane_state->alpha < 0xffff) {
5365                 *global_alpha = true;
5366                 *global_alpha_value = plane_state->alpha >> 8;
5367         }
5368 }
5369
5370 static int
5371 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5372                             const enum surface_pixel_format format,
5373                             enum dc_color_space *color_space)
5374 {
5375         bool full_range;
5376
5377         *color_space = COLOR_SPACE_SRGB;
5378
5379         /* DRM color properties only affect non-RGB formats. */
5380         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5381                 return 0;
5382
5383         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5384
5385         switch (plane_state->color_encoding) {
5386         case DRM_COLOR_YCBCR_BT601:
5387                 if (full_range)
5388                         *color_space = COLOR_SPACE_YCBCR601;
5389                 else
5390                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5391                 break;
5392
5393         case DRM_COLOR_YCBCR_BT709:
5394                 if (full_range)
5395                         *color_space = COLOR_SPACE_YCBCR709;
5396                 else
5397                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5398                 break;
5399
5400         case DRM_COLOR_YCBCR_BT2020:
5401                 if (full_range)
5402                         *color_space = COLOR_SPACE_2020_YCBCR;
5403                 else
5404                         return -EINVAL;
5405                 break;
5406
5407         default:
5408                 return -EINVAL;
5409         }
5410
5411         return 0;
5412 }
5413
5414 static int
5415 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5416                             const struct drm_plane_state *plane_state,
5417                             const uint64_t tiling_flags,
5418                             struct dc_plane_info *plane_info,
5419                             struct dc_plane_address *address,
5420                             bool tmz_surface,
5421                             bool force_disable_dcc)
5422 {
5423         const struct drm_framebuffer *fb = plane_state->fb;
5424         const struct amdgpu_framebuffer *afb =
5425                 to_amdgpu_framebuffer(plane_state->fb);
5426         int ret;
5427
5428         memset(plane_info, 0, sizeof(*plane_info));
5429
5430         switch (fb->format->format) {
5431         case DRM_FORMAT_C8:
5432                 plane_info->format =
5433                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5434                 break;
5435         case DRM_FORMAT_RGB565:
5436                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5437                 break;
5438         case DRM_FORMAT_XRGB8888:
5439         case DRM_FORMAT_ARGB8888:
5440                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5441                 break;
5442         case DRM_FORMAT_XRGB2101010:
5443         case DRM_FORMAT_ARGB2101010:
5444                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5445                 break;
5446         case DRM_FORMAT_XBGR2101010:
5447         case DRM_FORMAT_ABGR2101010:
5448                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5449                 break;
5450         case DRM_FORMAT_XBGR8888:
5451         case DRM_FORMAT_ABGR8888:
5452                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5453                 break;
5454         case DRM_FORMAT_NV21:
5455                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5456                 break;
5457         case DRM_FORMAT_NV12:
5458                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5459                 break;
5460         case DRM_FORMAT_P010:
5461                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5462                 break;
5463         case DRM_FORMAT_XRGB16161616F:
5464         case DRM_FORMAT_ARGB16161616F:
5465                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5466                 break;
5467         case DRM_FORMAT_XBGR16161616F:
5468         case DRM_FORMAT_ABGR16161616F:
5469                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5470                 break;
5471         case DRM_FORMAT_XRGB16161616:
5472         case DRM_FORMAT_ARGB16161616:
5473                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5474                 break;
5475         case DRM_FORMAT_XBGR16161616:
5476         case DRM_FORMAT_ABGR16161616:
5477                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5478                 break;
5479         default:
5480                 DRM_ERROR(
5481                         "Unsupported screen format %p4cc\n",
5482                         &fb->format->format);
5483                 return -EINVAL;
5484         }
5485
5486         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5487         case DRM_MODE_ROTATE_0:
5488                 plane_info->rotation = ROTATION_ANGLE_0;
5489                 break;
5490         case DRM_MODE_ROTATE_90:
5491                 plane_info->rotation = ROTATION_ANGLE_90;
5492                 break;
5493         case DRM_MODE_ROTATE_180:
5494                 plane_info->rotation = ROTATION_ANGLE_180;
5495                 break;
5496         case DRM_MODE_ROTATE_270:
5497                 plane_info->rotation = ROTATION_ANGLE_270;
5498                 break;
5499         default:
5500                 plane_info->rotation = ROTATION_ANGLE_0;
5501                 break;
5502         }
5503
5504         plane_info->visible = true;
5505         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5506
5507         plane_info->layer_index = 0;
5508
5509         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5510                                           &plane_info->color_space);
5511         if (ret)
5512                 return ret;
5513
5514         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5515                                            plane_info->rotation, tiling_flags,
5516                                            &plane_info->tiling_info,
5517                                            &plane_info->plane_size,
5518                                            &plane_info->dcc, address, tmz_surface,
5519                                            force_disable_dcc);
5520         if (ret)
5521                 return ret;
5522
5523         fill_blending_from_plane_state(
5524                 plane_state, &plane_info->per_pixel_alpha,
5525                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5526
5527         return 0;
5528 }
5529
5530 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5531                                     struct dc_plane_state *dc_plane_state,
5532                                     struct drm_plane_state *plane_state,
5533                                     struct drm_crtc_state *crtc_state)
5534 {
5535         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5536         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5537         struct dc_scaling_info scaling_info;
5538         struct dc_plane_info plane_info;
5539         int ret;
5540         bool force_disable_dcc = false;
5541
5542         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5543         if (ret)
5544                 return ret;
5545
5546         dc_plane_state->src_rect = scaling_info.src_rect;
5547         dc_plane_state->dst_rect = scaling_info.dst_rect;
5548         dc_plane_state->clip_rect = scaling_info.clip_rect;
5549         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5550
5551         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5552         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5553                                           afb->tiling_flags,
5554                                           &plane_info,
5555                                           &dc_plane_state->address,
5556                                           afb->tmz_surface,
5557                                           force_disable_dcc);
5558         if (ret)
5559                 return ret;
5560
5561         dc_plane_state->format = plane_info.format;
5562         dc_plane_state->color_space = plane_info.color_space;
5563         dc_plane_state->format = plane_info.format;
5564         dc_plane_state->plane_size = plane_info.plane_size;
5565         dc_plane_state->rotation = plane_info.rotation;
5566         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5567         dc_plane_state->stereo_format = plane_info.stereo_format;
5568         dc_plane_state->tiling_info = plane_info.tiling_info;
5569         dc_plane_state->visible = plane_info.visible;
5570         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5571         dc_plane_state->global_alpha = plane_info.global_alpha;
5572         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5573         dc_plane_state->dcc = plane_info.dcc;
5574         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5575         dc_plane_state->flip_int_enabled = true;
5576
5577         /*
5578          * Always set input transfer function, since plane state is refreshed
5579          * every time.
5580          */
5581         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5582         if (ret)
5583                 return ret;
5584
5585         return 0;
5586 }
5587
5588 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5589                                            const struct dm_connector_state *dm_state,
5590                                            struct dc_stream_state *stream)
5591 {
5592         enum amdgpu_rmx_type rmx_type;
5593
5594         struct rect src = { 0 }; /* viewport in composition space*/
5595         struct rect dst = { 0 }; /* stream addressable area */
5596
5597         /* no mode. nothing to be done */
5598         if (!mode)
5599                 return;
5600
5601         /* Full screen scaling by default */
5602         src.width = mode->hdisplay;
5603         src.height = mode->vdisplay;
5604         dst.width = stream->timing.h_addressable;
5605         dst.height = stream->timing.v_addressable;
5606
5607         if (dm_state) {
5608                 rmx_type = dm_state->scaling;
5609                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5610                         if (src.width * dst.height <
5611                                         src.height * dst.width) {
5612                                 /* height needs less upscaling/more downscaling */
5613                                 dst.width = src.width *
5614                                                 dst.height / src.height;
5615                         } else {
5616                                 /* width needs less upscaling/more downscaling */
5617                                 dst.height = src.height *
5618                                                 dst.width / src.width;
5619                         }
5620                 } else if (rmx_type == RMX_CENTER) {
5621                         dst = src;
5622                 }
5623
5624                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5625                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5626
5627                 if (dm_state->underscan_enable) {
5628                         dst.x += dm_state->underscan_hborder / 2;
5629                         dst.y += dm_state->underscan_vborder / 2;
5630                         dst.width -= dm_state->underscan_hborder;
5631                         dst.height -= dm_state->underscan_vborder;
5632                 }
5633         }
5634
5635         stream->src = src;
5636         stream->dst = dst;
5637
5638         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5639                       dst.x, dst.y, dst.width, dst.height);
5640
5641 }
5642
5643 static enum dc_color_depth
5644 convert_color_depth_from_display_info(const struct drm_connector *connector,
5645                                       bool is_y420, int requested_bpc)
5646 {
5647         uint8_t bpc;
5648
5649         if (is_y420) {
5650                 bpc = 8;
5651
5652                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5653                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5654                         bpc = 16;
5655                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5656                         bpc = 12;
5657                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5658                         bpc = 10;
5659         } else {
5660                 bpc = (uint8_t)connector->display_info.bpc;
5661                 /* Assume 8 bpc by default if no bpc is specified. */
5662                 bpc = bpc ? bpc : 8;
5663         }
5664
5665         if (requested_bpc > 0) {
5666                 /*
5667                  * Cap display bpc based on the user requested value.
5668                  *
5669                  * The value for state->max_bpc may not correctly updated
5670                  * depending on when the connector gets added to the state
5671                  * or if this was called outside of atomic check, so it
5672                  * can't be used directly.
5673                  */
5674                 bpc = min_t(u8, bpc, requested_bpc);
5675
5676                 /* Round down to the nearest even number. */
5677                 bpc = bpc - (bpc & 1);
5678         }
5679
5680         switch (bpc) {
5681         case 0:
5682                 /*
5683                  * Temporary Work around, DRM doesn't parse color depth for
5684                  * EDID revision before 1.4
5685                  * TODO: Fix edid parsing
5686                  */
5687                 return COLOR_DEPTH_888;
5688         case 6:
5689                 return COLOR_DEPTH_666;
5690         case 8:
5691                 return COLOR_DEPTH_888;
5692         case 10:
5693                 return COLOR_DEPTH_101010;
5694         case 12:
5695                 return COLOR_DEPTH_121212;
5696         case 14:
5697                 return COLOR_DEPTH_141414;
5698         case 16:
5699                 return COLOR_DEPTH_161616;
5700         default:
5701                 return COLOR_DEPTH_UNDEFINED;
5702         }
5703 }
5704
5705 static enum dc_aspect_ratio
5706 get_aspect_ratio(const struct drm_display_mode *mode_in)
5707 {
5708         /* 1-1 mapping, since both enums follow the HDMI spec. */
5709         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5710 }
5711
5712 static enum dc_color_space
5713 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5714 {
5715         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5716
5717         switch (dc_crtc_timing->pixel_encoding) {
5718         case PIXEL_ENCODING_YCBCR422:
5719         case PIXEL_ENCODING_YCBCR444:
5720         case PIXEL_ENCODING_YCBCR420:
5721         {
5722                 /*
5723                  * 27030khz is the separation point between HDTV and SDTV
5724                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5725                  * respectively
5726                  */
5727                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5728                         if (dc_crtc_timing->flags.Y_ONLY)
5729                                 color_space =
5730                                         COLOR_SPACE_YCBCR709_LIMITED;
5731                         else
5732                                 color_space = COLOR_SPACE_YCBCR709;
5733                 } else {
5734                         if (dc_crtc_timing->flags.Y_ONLY)
5735                                 color_space =
5736                                         COLOR_SPACE_YCBCR601_LIMITED;
5737                         else
5738                                 color_space = COLOR_SPACE_YCBCR601;
5739                 }
5740
5741         }
5742         break;
5743         case PIXEL_ENCODING_RGB:
5744                 color_space = COLOR_SPACE_SRGB;
5745                 break;
5746
5747         default:
5748                 WARN_ON(1);
5749                 break;
5750         }
5751
5752         return color_space;
5753 }
5754
5755 static bool adjust_colour_depth_from_display_info(
5756         struct dc_crtc_timing *timing_out,
5757         const struct drm_display_info *info)
5758 {
5759         enum dc_color_depth depth = timing_out->display_color_depth;
5760         int normalized_clk;
5761         do {
5762                 normalized_clk = timing_out->pix_clk_100hz / 10;
5763                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5764                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5765                         normalized_clk /= 2;
5766                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5767                 switch (depth) {
5768                 case COLOR_DEPTH_888:
5769                         break;
5770                 case COLOR_DEPTH_101010:
5771                         normalized_clk = (normalized_clk * 30) / 24;
5772                         break;
5773                 case COLOR_DEPTH_121212:
5774                         normalized_clk = (normalized_clk * 36) / 24;
5775                         break;
5776                 case COLOR_DEPTH_161616:
5777                         normalized_clk = (normalized_clk * 48) / 24;
5778                         break;
5779                 default:
5780                         /* The above depths are the only ones valid for HDMI. */
5781                         return false;
5782                 }
5783                 if (normalized_clk <= info->max_tmds_clock) {
5784                         timing_out->display_color_depth = depth;
5785                         return true;
5786                 }
5787         } while (--depth > COLOR_DEPTH_666);
5788         return false;
5789 }
5790
5791 static void fill_stream_properties_from_drm_display_mode(
5792         struct dc_stream_state *stream,
5793         const struct drm_display_mode *mode_in,
5794         const struct drm_connector *connector,
5795         const struct drm_connector_state *connector_state,
5796         const struct dc_stream_state *old_stream,
5797         int requested_bpc)
5798 {
5799         struct dc_crtc_timing *timing_out = &stream->timing;
5800         const struct drm_display_info *info = &connector->display_info;
5801         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5802         struct hdmi_vendor_infoframe hv_frame;
5803         struct hdmi_avi_infoframe avi_frame;
5804
5805         memset(&hv_frame, 0, sizeof(hv_frame));
5806         memset(&avi_frame, 0, sizeof(avi_frame));
5807
5808         timing_out->h_border_left = 0;
5809         timing_out->h_border_right = 0;
5810         timing_out->v_border_top = 0;
5811         timing_out->v_border_bottom = 0;
5812         /* TODO: un-hardcode */
5813         if (drm_mode_is_420_only(info, mode_in)
5814                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5815                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5816         else if (drm_mode_is_420_also(info, mode_in)
5817                         && aconnector->force_yuv420_output)
5818                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5819         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5820                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5821                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5822         else
5823                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5824
5825         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5826         timing_out->display_color_depth = convert_color_depth_from_display_info(
5827                 connector,
5828                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5829                 requested_bpc);
5830         timing_out->scan_type = SCANNING_TYPE_NODATA;
5831         timing_out->hdmi_vic = 0;
5832
5833         if(old_stream) {
5834                 timing_out->vic = old_stream->timing.vic;
5835                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5836                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5837         } else {
5838                 timing_out->vic = drm_match_cea_mode(mode_in);
5839                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5840                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5841                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5842                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5843         }
5844
5845         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5846                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5847                 timing_out->vic = avi_frame.video_code;
5848                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5849                 timing_out->hdmi_vic = hv_frame.vic;
5850         }
5851
5852         if (is_freesync_video_mode(mode_in, aconnector)) {
5853                 timing_out->h_addressable = mode_in->hdisplay;
5854                 timing_out->h_total = mode_in->htotal;
5855                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5856                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5857                 timing_out->v_total = mode_in->vtotal;
5858                 timing_out->v_addressable = mode_in->vdisplay;
5859                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5860                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5861                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5862         } else {
5863                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5864                 timing_out->h_total = mode_in->crtc_htotal;
5865                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5866                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5867                 timing_out->v_total = mode_in->crtc_vtotal;
5868                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5869                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5870                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5871                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5872         }
5873
5874         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5875
5876         stream->output_color_space = get_output_color_space(timing_out);
5877
5878         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5879         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5880         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5881                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5882                     drm_mode_is_420_also(info, mode_in) &&
5883                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5884                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5885                         adjust_colour_depth_from_display_info(timing_out, info);
5886                 }
5887         }
5888 }
5889
5890 static void fill_audio_info(struct audio_info *audio_info,
5891                             const struct drm_connector *drm_connector,
5892                             const struct dc_sink *dc_sink)
5893 {
5894         int i = 0;
5895         int cea_revision = 0;
5896         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5897
5898         audio_info->manufacture_id = edid_caps->manufacturer_id;
5899         audio_info->product_id = edid_caps->product_id;
5900
5901         cea_revision = drm_connector->display_info.cea_rev;
5902
5903         strscpy(audio_info->display_name,
5904                 edid_caps->display_name,
5905                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5906
5907         if (cea_revision >= 3) {
5908                 audio_info->mode_count = edid_caps->audio_mode_count;
5909
5910                 for (i = 0; i < audio_info->mode_count; ++i) {
5911                         audio_info->modes[i].format_code =
5912                                         (enum audio_format_code)
5913                                         (edid_caps->audio_modes[i].format_code);
5914                         audio_info->modes[i].channel_count =
5915                                         edid_caps->audio_modes[i].channel_count;
5916                         audio_info->modes[i].sample_rates.all =
5917                                         edid_caps->audio_modes[i].sample_rate;
5918                         audio_info->modes[i].sample_size =
5919                                         edid_caps->audio_modes[i].sample_size;
5920                 }
5921         }
5922
5923         audio_info->flags.all = edid_caps->speaker_flags;
5924
5925         /* TODO: We only check for the progressive mode, check for interlace mode too */
5926         if (drm_connector->latency_present[0]) {
5927                 audio_info->video_latency = drm_connector->video_latency[0];
5928                 audio_info->audio_latency = drm_connector->audio_latency[0];
5929         }
5930
5931         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5932
5933 }
5934
5935 static void
5936 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5937                                       struct drm_display_mode *dst_mode)
5938 {
5939         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5940         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5941         dst_mode->crtc_clock = src_mode->crtc_clock;
5942         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5943         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5944         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5945         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5946         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5947         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5948         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5949         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5950         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5951         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5952         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5953 }
5954
5955 static void
5956 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5957                                         const struct drm_display_mode *native_mode,
5958                                         bool scale_enabled)
5959 {
5960         if (scale_enabled) {
5961                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5962         } else if (native_mode->clock == drm_mode->clock &&
5963                         native_mode->htotal == drm_mode->htotal &&
5964                         native_mode->vtotal == drm_mode->vtotal) {
5965                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5966         } else {
5967                 /* no scaling nor amdgpu inserted, no need to patch */
5968         }
5969 }
5970
5971 static struct dc_sink *
5972 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5973 {
5974         struct dc_sink_init_data sink_init_data = { 0 };
5975         struct dc_sink *sink = NULL;
5976         sink_init_data.link = aconnector->dc_link;
5977         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5978
5979         sink = dc_sink_create(&sink_init_data);
5980         if (!sink) {
5981                 DRM_ERROR("Failed to create sink!\n");
5982                 return NULL;
5983         }
5984         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5985
5986         return sink;
5987 }
5988
5989 static void set_multisync_trigger_params(
5990                 struct dc_stream_state *stream)
5991 {
5992         struct dc_stream_state *master = NULL;
5993
5994         if (stream->triggered_crtc_reset.enabled) {
5995                 master = stream->triggered_crtc_reset.event_source;
5996                 stream->triggered_crtc_reset.event =
5997                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5998                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5999                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6000         }
6001 }
6002
6003 static void set_master_stream(struct dc_stream_state *stream_set[],
6004                               int stream_count)
6005 {
6006         int j, highest_rfr = 0, master_stream = 0;
6007
6008         for (j = 0;  j < stream_count; j++) {
6009                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6010                         int refresh_rate = 0;
6011
6012                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6013                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6014                         if (refresh_rate > highest_rfr) {
6015                                 highest_rfr = refresh_rate;
6016                                 master_stream = j;
6017                         }
6018                 }
6019         }
6020         for (j = 0;  j < stream_count; j++) {
6021                 if (stream_set[j])
6022                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6023         }
6024 }
6025
6026 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6027 {
6028         int i = 0;
6029         struct dc_stream_state *stream;
6030
6031         if (context->stream_count < 2)
6032                 return;
6033         for (i = 0; i < context->stream_count ; i++) {
6034                 if (!context->streams[i])
6035                         continue;
6036                 /*
6037                  * TODO: add a function to read AMD VSDB bits and set
6038                  * crtc_sync_master.multi_sync_enabled flag
6039                  * For now it's set to false
6040                  */
6041         }
6042
6043         set_master_stream(context->streams, context->stream_count);
6044
6045         for (i = 0; i < context->stream_count ; i++) {
6046                 stream = context->streams[i];
6047
6048                 if (!stream)
6049                         continue;
6050
6051                 set_multisync_trigger_params(stream);
6052         }
6053 }
6054
6055 #if defined(CONFIG_DRM_AMD_DC_DCN)
6056 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6057                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6058                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6059 {
6060         stream->timing.flags.DSC = 0;
6061
6062         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6063                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6064                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6065                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6066                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6067                                       dsc_caps);
6068         }
6069 }
6070
6071 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6072                                     struct dc_sink *sink, struct dc_stream_state *stream,
6073                                     struct dsc_dec_dpcd_caps *dsc_caps,
6074                                     uint32_t max_dsc_target_bpp_limit_override)
6075 {
6076         const struct dc_link_settings *verified_link_cap = NULL;
6077         uint32_t link_bw_in_kbps;
6078         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6079         struct dc *dc = sink->ctx->dc;
6080         struct dc_dsc_bw_range bw_range = {0};
6081         struct dc_dsc_config dsc_cfg = {0};
6082
6083         verified_link_cap = dc_link_get_link_cap(stream->link);
6084         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6085         edp_min_bpp_x16 = 8 * 16;
6086         edp_max_bpp_x16 = 8 * 16;
6087
6088         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6089                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6090
6091         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6092                 edp_min_bpp_x16 = edp_max_bpp_x16;
6093
6094         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6095                                 dc->debug.dsc_min_slice_height_override,
6096                                 edp_min_bpp_x16, edp_max_bpp_x16,
6097                                 dsc_caps,
6098                                 &stream->timing,
6099                                 &bw_range)) {
6100
6101                 if (bw_range.max_kbps < link_bw_in_kbps) {
6102                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6103                                         dsc_caps,
6104                                         dc->debug.dsc_min_slice_height_override,
6105                                         max_dsc_target_bpp_limit_override,
6106                                         0,
6107                                         &stream->timing,
6108                                         &dsc_cfg)) {
6109                                 stream->timing.dsc_cfg = dsc_cfg;
6110                                 stream->timing.flags.DSC = 1;
6111                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6112                         }
6113                         return;
6114                 }
6115         }
6116
6117         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6118                                 dsc_caps,
6119                                 dc->debug.dsc_min_slice_height_override,
6120                                 max_dsc_target_bpp_limit_override,
6121                                 link_bw_in_kbps,
6122                                 &stream->timing,
6123                                 &dsc_cfg)) {
6124                 stream->timing.dsc_cfg = dsc_cfg;
6125                 stream->timing.flags.DSC = 1;
6126         }
6127 }
6128
6129 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6130                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6131                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6132 {
6133         struct drm_connector *drm_connector = &aconnector->base;
6134         uint32_t link_bandwidth_kbps;
6135         uint32_t max_dsc_target_bpp_limit_override = 0;
6136         struct dc *dc = sink->ctx->dc;
6137
6138         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6139                                                         dc_link_get_link_cap(aconnector->dc_link));
6140
6141         if (stream->link && stream->link->local_sink)
6142                 max_dsc_target_bpp_limit_override =
6143                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6144         
6145         /* Set DSC policy according to dsc_clock_en */
6146         dc_dsc_policy_set_enable_dsc_when_not_needed(
6147                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6148
6149         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6150             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6151
6152                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6153
6154         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6155
6156                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6157                                                 dsc_caps,
6158                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6159                                                 max_dsc_target_bpp_limit_override,
6160                                                 link_bandwidth_kbps,
6161                                                 &stream->timing,
6162                                                 &stream->timing.dsc_cfg)) {
6163                         stream->timing.flags.DSC = 1;
6164                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6165                 }
6166         }
6167
6168         /* Overwrite the stream flag if DSC is enabled through debugfs */
6169         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6170                 stream->timing.flags.DSC = 1;
6171
6172         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6173                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6174
6175         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6176                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6177
6178         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6179                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6180 }
6181 #endif /* CONFIG_DRM_AMD_DC_DCN */
6182
6183 /**
6184  * DOC: FreeSync Video
6185  *
6186  * When a userspace application wants to play a video, the content follows a
6187  * standard format definition that usually specifies the FPS for that format.
6188  * The below list illustrates some video format and the expected FPS,
6189  * respectively:
6190  *
6191  * - TV/NTSC (23.976 FPS)
6192  * - Cinema (24 FPS)
6193  * - TV/PAL (25 FPS)
6194  * - TV/NTSC (29.97 FPS)
6195  * - TV/NTSC (30 FPS)
6196  * - Cinema HFR (48 FPS)
6197  * - TV/PAL (50 FPS)
6198  * - Commonly used (60 FPS)
6199  * - Multiples of 24 (48,72,96,120 FPS)
6200  *
6201  * The list of standards video format is not huge and can be added to the
6202  * connector modeset list beforehand. With that, userspace can leverage
6203  * FreeSync to extends the front porch in order to attain the target refresh
6204  * rate. Such a switch will happen seamlessly, without screen blanking or
6205  * reprogramming of the output in any other way. If the userspace requests a
6206  * modesetting change compatible with FreeSync modes that only differ in the
6207  * refresh rate, DC will skip the full update and avoid blink during the
6208  * transition. For example, the video player can change the modesetting from
6209  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6210  * causing any display blink. This same concept can be applied to a mode
6211  * setting change.
6212  */
6213 static struct drm_display_mode *
6214 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6215                           bool use_probed_modes)
6216 {
6217         struct drm_display_mode *m, *m_pref = NULL;
6218         u16 current_refresh, highest_refresh;
6219         struct list_head *list_head = use_probed_modes ?
6220                                                     &aconnector->base.probed_modes :
6221                                                     &aconnector->base.modes;
6222
6223         if (aconnector->freesync_vid_base.clock != 0)
6224                 return &aconnector->freesync_vid_base;
6225
6226         /* Find the preferred mode */
6227         list_for_each_entry (m, list_head, head) {
6228                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6229                         m_pref = m;
6230                         break;
6231                 }
6232         }
6233
6234         if (!m_pref) {
6235                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6236                 m_pref = list_first_entry_or_null(
6237                         &aconnector->base.modes, struct drm_display_mode, head);
6238                 if (!m_pref) {
6239                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6240                         return NULL;
6241                 }
6242         }
6243
6244         highest_refresh = drm_mode_vrefresh(m_pref);
6245
6246         /*
6247          * Find the mode with highest refresh rate with same resolution.
6248          * For some monitors, preferred mode is not the mode with highest
6249          * supported refresh rate.
6250          */
6251         list_for_each_entry (m, list_head, head) {
6252                 current_refresh  = drm_mode_vrefresh(m);
6253
6254                 if (m->hdisplay == m_pref->hdisplay &&
6255                     m->vdisplay == m_pref->vdisplay &&
6256                     highest_refresh < current_refresh) {
6257                         highest_refresh = current_refresh;
6258                         m_pref = m;
6259                 }
6260         }
6261
6262         aconnector->freesync_vid_base = *m_pref;
6263         return m_pref;
6264 }
6265
6266 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6267                                    struct amdgpu_dm_connector *aconnector)
6268 {
6269         struct drm_display_mode *high_mode;
6270         int timing_diff;
6271
6272         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6273         if (!high_mode || !mode)
6274                 return false;
6275
6276         timing_diff = high_mode->vtotal - mode->vtotal;
6277
6278         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6279             high_mode->hdisplay != mode->hdisplay ||
6280             high_mode->vdisplay != mode->vdisplay ||
6281             high_mode->hsync_start != mode->hsync_start ||
6282             high_mode->hsync_end != mode->hsync_end ||
6283             high_mode->htotal != mode->htotal ||
6284             high_mode->hskew != mode->hskew ||
6285             high_mode->vscan != mode->vscan ||
6286             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6287             high_mode->vsync_end - mode->vsync_end != timing_diff)
6288                 return false;
6289         else
6290                 return true;
6291 }
6292
6293 static struct dc_stream_state *
6294 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6295                        const struct drm_display_mode *drm_mode,
6296                        const struct dm_connector_state *dm_state,
6297                        const struct dc_stream_state *old_stream,
6298                        int requested_bpc)
6299 {
6300         struct drm_display_mode *preferred_mode = NULL;
6301         struct drm_connector *drm_connector;
6302         const struct drm_connector_state *con_state =
6303                 dm_state ? &dm_state->base : NULL;
6304         struct dc_stream_state *stream = NULL;
6305         struct drm_display_mode mode = *drm_mode;
6306         struct drm_display_mode saved_mode;
6307         struct drm_display_mode *freesync_mode = NULL;
6308         bool native_mode_found = false;
6309         bool recalculate_timing = false;
6310         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6311         int mode_refresh;
6312         int preferred_refresh = 0;
6313 #if defined(CONFIG_DRM_AMD_DC_DCN)
6314         struct dsc_dec_dpcd_caps dsc_caps;
6315 #endif
6316         struct dc_sink *sink = NULL;
6317
6318         memset(&saved_mode, 0, sizeof(saved_mode));
6319
6320         if (aconnector == NULL) {
6321                 DRM_ERROR("aconnector is NULL!\n");
6322                 return stream;
6323         }
6324
6325         drm_connector = &aconnector->base;
6326
6327         if (!aconnector->dc_sink) {
6328                 sink = create_fake_sink(aconnector);
6329                 if (!sink)
6330                         return stream;
6331         } else {
6332                 sink = aconnector->dc_sink;
6333                 dc_sink_retain(sink);
6334         }
6335
6336         stream = dc_create_stream_for_sink(sink);
6337
6338         if (stream == NULL) {
6339                 DRM_ERROR("Failed to create stream for sink!\n");
6340                 goto finish;
6341         }
6342
6343         stream->dm_stream_context = aconnector;
6344
6345         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6346                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6347
6348         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6349                 /* Search for preferred mode */
6350                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6351                         native_mode_found = true;
6352                         break;
6353                 }
6354         }
6355         if (!native_mode_found)
6356                 preferred_mode = list_first_entry_or_null(
6357                                 &aconnector->base.modes,
6358                                 struct drm_display_mode,
6359                                 head);
6360
6361         mode_refresh = drm_mode_vrefresh(&mode);
6362
6363         if (preferred_mode == NULL) {
6364                 /*
6365                  * This may not be an error, the use case is when we have no
6366                  * usermode calls to reset and set mode upon hotplug. In this
6367                  * case, we call set mode ourselves to restore the previous mode
6368                  * and the modelist may not be filled in in time.
6369                  */
6370                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6371         } else {
6372                 recalculate_timing = amdgpu_freesync_vid_mode &&
6373                                  is_freesync_video_mode(&mode, aconnector);
6374                 if (recalculate_timing) {
6375                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6376                         saved_mode = mode;
6377                         mode = *freesync_mode;
6378                 } else {
6379                         decide_crtc_timing_for_drm_display_mode(
6380                                 &mode, preferred_mode, scale);
6381
6382                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6383                 }
6384         }
6385
6386         if (recalculate_timing)
6387                 drm_mode_set_crtcinfo(&saved_mode, 0);
6388         else if (!dm_state)
6389                 drm_mode_set_crtcinfo(&mode, 0);
6390
6391        /*
6392         * If scaling is enabled and refresh rate didn't change
6393         * we copy the vic and polarities of the old timings
6394         */
6395         if (!scale || mode_refresh != preferred_refresh)
6396                 fill_stream_properties_from_drm_display_mode(
6397                         stream, &mode, &aconnector->base, con_state, NULL,
6398                         requested_bpc);
6399         else
6400                 fill_stream_properties_from_drm_display_mode(
6401                         stream, &mode, &aconnector->base, con_state, old_stream,
6402                         requested_bpc);
6403
6404 #if defined(CONFIG_DRM_AMD_DC_DCN)
6405         /* SST DSC determination policy */
6406         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6407         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6408                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6409 #endif
6410
6411         update_stream_scaling_settings(&mode, dm_state, stream);
6412
6413         fill_audio_info(
6414                 &stream->audio_info,
6415                 drm_connector,
6416                 sink);
6417
6418         update_stream_signal(stream, sink);
6419
6420         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6421                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6422
6423         if (stream->link->psr_settings.psr_feature_enabled) {
6424                 //
6425                 // should decide stream support vsc sdp colorimetry capability
6426                 // before building vsc info packet
6427                 //
6428                 stream->use_vsc_sdp_for_colorimetry = false;
6429                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6430                         stream->use_vsc_sdp_for_colorimetry =
6431                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6432                 } else {
6433                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6434                                 stream->use_vsc_sdp_for_colorimetry = true;
6435                 }
6436                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6437                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6438
6439         }
6440 finish:
6441         dc_sink_release(sink);
6442
6443         return stream;
6444 }
6445
6446 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6447 {
6448         drm_crtc_cleanup(crtc);
6449         kfree(crtc);
6450 }
6451
6452 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6453                                   struct drm_crtc_state *state)
6454 {
6455         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6456
6457         /* TODO Destroy dc_stream objects are stream object is flattened */
6458         if (cur->stream)
6459                 dc_stream_release(cur->stream);
6460
6461
6462         __drm_atomic_helper_crtc_destroy_state(state);
6463
6464
6465         kfree(state);
6466 }
6467
6468 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6469 {
6470         struct dm_crtc_state *state;
6471
6472         if (crtc->state)
6473                 dm_crtc_destroy_state(crtc, crtc->state);
6474
6475         state = kzalloc(sizeof(*state), GFP_KERNEL);
6476         if (WARN_ON(!state))
6477                 return;
6478
6479         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6480 }
6481
6482 static struct drm_crtc_state *
6483 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6484 {
6485         struct dm_crtc_state *state, *cur;
6486
6487         cur = to_dm_crtc_state(crtc->state);
6488
6489         if (WARN_ON(!crtc->state))
6490                 return NULL;
6491
6492         state = kzalloc(sizeof(*state), GFP_KERNEL);
6493         if (!state)
6494                 return NULL;
6495
6496         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6497
6498         if (cur->stream) {
6499                 state->stream = cur->stream;
6500                 dc_stream_retain(state->stream);
6501         }
6502
6503         state->active_planes = cur->active_planes;
6504         state->vrr_infopacket = cur->vrr_infopacket;
6505         state->abm_level = cur->abm_level;
6506         state->vrr_supported = cur->vrr_supported;
6507         state->freesync_config = cur->freesync_config;
6508         state->cm_has_degamma = cur->cm_has_degamma;
6509         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6510         state->force_dpms_off = cur->force_dpms_off;
6511         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6512
6513         return &state->base;
6514 }
6515
6516 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6517 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6518 {
6519         crtc_debugfs_init(crtc);
6520
6521         return 0;
6522 }
6523 #endif
6524
6525 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6526 {
6527         enum dc_irq_source irq_source;
6528         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6529         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6530         int rc;
6531
6532         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6533
6534         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6535
6536         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6537                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6538         return rc;
6539 }
6540
6541 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6542 {
6543         enum dc_irq_source irq_source;
6544         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6545         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6546         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6547 #if defined(CONFIG_DRM_AMD_DC_DCN)
6548         struct amdgpu_display_manager *dm = &adev->dm;
6549         struct vblank_control_work *work;
6550 #endif
6551         int rc = 0;
6552
6553         if (enable) {
6554                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6555                 if (amdgpu_dm_vrr_active(acrtc_state))
6556                         rc = dm_set_vupdate_irq(crtc, true);
6557         } else {
6558                 /* vblank irq off -> vupdate irq off */
6559                 rc = dm_set_vupdate_irq(crtc, false);
6560         }
6561
6562         if (rc)
6563                 return rc;
6564
6565         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6566
6567         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6568                 return -EBUSY;
6569
6570         if (amdgpu_in_reset(adev))
6571                 return 0;
6572
6573 #if defined(CONFIG_DRM_AMD_DC_DCN)
6574         if (dm->vblank_control_workqueue) {
6575                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6576                 if (!work)
6577                         return -ENOMEM;
6578
6579                 INIT_WORK(&work->work, vblank_control_worker);
6580                 work->dm = dm;
6581                 work->acrtc = acrtc;
6582                 work->enable = enable;
6583
6584                 if (acrtc_state->stream) {
6585                         dc_stream_retain(acrtc_state->stream);
6586                         work->stream = acrtc_state->stream;
6587                 }
6588
6589                 queue_work(dm->vblank_control_workqueue, &work->work);
6590         }
6591 #endif
6592
6593         return 0;
6594 }
6595
6596 static int dm_enable_vblank(struct drm_crtc *crtc)
6597 {
6598         return dm_set_vblank(crtc, true);
6599 }
6600
6601 static void dm_disable_vblank(struct drm_crtc *crtc)
6602 {
6603         dm_set_vblank(crtc, false);
6604 }
6605
6606 /* Implemented only the options currently availible for the driver */
6607 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6608         .reset = dm_crtc_reset_state,
6609         .destroy = amdgpu_dm_crtc_destroy,
6610         .set_config = drm_atomic_helper_set_config,
6611         .page_flip = drm_atomic_helper_page_flip,
6612         .atomic_duplicate_state = dm_crtc_duplicate_state,
6613         .atomic_destroy_state = dm_crtc_destroy_state,
6614         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6615         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6616         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6617         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6618         .enable_vblank = dm_enable_vblank,
6619         .disable_vblank = dm_disable_vblank,
6620         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6622         .late_register = amdgpu_dm_crtc_late_register,
6623 #endif
6624 };
6625
6626 static enum drm_connector_status
6627 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6628 {
6629         bool connected;
6630         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6631
6632         /*
6633          * Notes:
6634          * 1. This interface is NOT called in context of HPD irq.
6635          * 2. This interface *is called* in context of user-mode ioctl. Which
6636          * makes it a bad place for *any* MST-related activity.
6637          */
6638
6639         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6640             !aconnector->fake_enable)
6641                 connected = (aconnector->dc_sink != NULL);
6642         else
6643                 connected = (aconnector->base.force == DRM_FORCE_ON);
6644
6645         update_subconnector_property(aconnector);
6646
6647         return (connected ? connector_status_connected :
6648                         connector_status_disconnected);
6649 }
6650
6651 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6652                                             struct drm_connector_state *connector_state,
6653                                             struct drm_property *property,
6654                                             uint64_t val)
6655 {
6656         struct drm_device *dev = connector->dev;
6657         struct amdgpu_device *adev = drm_to_adev(dev);
6658         struct dm_connector_state *dm_old_state =
6659                 to_dm_connector_state(connector->state);
6660         struct dm_connector_state *dm_new_state =
6661                 to_dm_connector_state(connector_state);
6662
6663         int ret = -EINVAL;
6664
6665         if (property == dev->mode_config.scaling_mode_property) {
6666                 enum amdgpu_rmx_type rmx_type;
6667
6668                 switch (val) {
6669                 case DRM_MODE_SCALE_CENTER:
6670                         rmx_type = RMX_CENTER;
6671                         break;
6672                 case DRM_MODE_SCALE_ASPECT:
6673                         rmx_type = RMX_ASPECT;
6674                         break;
6675                 case DRM_MODE_SCALE_FULLSCREEN:
6676                         rmx_type = RMX_FULL;
6677                         break;
6678                 case DRM_MODE_SCALE_NONE:
6679                 default:
6680                         rmx_type = RMX_OFF;
6681                         break;
6682                 }
6683
6684                 if (dm_old_state->scaling == rmx_type)
6685                         return 0;
6686
6687                 dm_new_state->scaling = rmx_type;
6688                 ret = 0;
6689         } else if (property == adev->mode_info.underscan_hborder_property) {
6690                 dm_new_state->underscan_hborder = val;
6691                 ret = 0;
6692         } else if (property == adev->mode_info.underscan_vborder_property) {
6693                 dm_new_state->underscan_vborder = val;
6694                 ret = 0;
6695         } else if (property == adev->mode_info.underscan_property) {
6696                 dm_new_state->underscan_enable = val;
6697                 ret = 0;
6698         } else if (property == adev->mode_info.abm_level_property) {
6699                 dm_new_state->abm_level = val;
6700                 ret = 0;
6701         }
6702
6703         return ret;
6704 }
6705
6706 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6707                                             const struct drm_connector_state *state,
6708                                             struct drm_property *property,
6709                                             uint64_t *val)
6710 {
6711         struct drm_device *dev = connector->dev;
6712         struct amdgpu_device *adev = drm_to_adev(dev);
6713         struct dm_connector_state *dm_state =
6714                 to_dm_connector_state(state);
6715         int ret = -EINVAL;
6716
6717         if (property == dev->mode_config.scaling_mode_property) {
6718                 switch (dm_state->scaling) {
6719                 case RMX_CENTER:
6720                         *val = DRM_MODE_SCALE_CENTER;
6721                         break;
6722                 case RMX_ASPECT:
6723                         *val = DRM_MODE_SCALE_ASPECT;
6724                         break;
6725                 case RMX_FULL:
6726                         *val = DRM_MODE_SCALE_FULLSCREEN;
6727                         break;
6728                 case RMX_OFF:
6729                 default:
6730                         *val = DRM_MODE_SCALE_NONE;
6731                         break;
6732                 }
6733                 ret = 0;
6734         } else if (property == adev->mode_info.underscan_hborder_property) {
6735                 *val = dm_state->underscan_hborder;
6736                 ret = 0;
6737         } else if (property == adev->mode_info.underscan_vborder_property) {
6738                 *val = dm_state->underscan_vborder;
6739                 ret = 0;
6740         } else if (property == adev->mode_info.underscan_property) {
6741                 *val = dm_state->underscan_enable;
6742                 ret = 0;
6743         } else if (property == adev->mode_info.abm_level_property) {
6744                 *val = dm_state->abm_level;
6745                 ret = 0;
6746         }
6747
6748         return ret;
6749 }
6750
6751 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6752 {
6753         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6754
6755         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6756 }
6757
6758 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6759 {
6760         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6761         const struct dc_link *link = aconnector->dc_link;
6762         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6763         struct amdgpu_display_manager *dm = &adev->dm;
6764         int i;
6765
6766         /*
6767          * Call only if mst_mgr was iniitalized before since it's not done
6768          * for all connector types.
6769          */
6770         if (aconnector->mst_mgr.dev)
6771                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6772
6773 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6774         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6775         for (i = 0; i < dm->num_of_edps; i++) {
6776                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6777                         backlight_device_unregister(dm->backlight_dev[i]);
6778                         dm->backlight_dev[i] = NULL;
6779                 }
6780         }
6781 #endif
6782
6783         if (aconnector->dc_em_sink)
6784                 dc_sink_release(aconnector->dc_em_sink);
6785         aconnector->dc_em_sink = NULL;
6786         if (aconnector->dc_sink)
6787                 dc_sink_release(aconnector->dc_sink);
6788         aconnector->dc_sink = NULL;
6789
6790         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6791         drm_connector_unregister(connector);
6792         drm_connector_cleanup(connector);
6793         if (aconnector->i2c) {
6794                 i2c_del_adapter(&aconnector->i2c->base);
6795                 kfree(aconnector->i2c);
6796         }
6797         kfree(aconnector->dm_dp_aux.aux.name);
6798
6799         kfree(connector);
6800 }
6801
6802 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6803 {
6804         struct dm_connector_state *state =
6805                 to_dm_connector_state(connector->state);
6806
6807         if (connector->state)
6808                 __drm_atomic_helper_connector_destroy_state(connector->state);
6809
6810         kfree(state);
6811
6812         state = kzalloc(sizeof(*state), GFP_KERNEL);
6813
6814         if (state) {
6815                 state->scaling = RMX_OFF;
6816                 state->underscan_enable = false;
6817                 state->underscan_hborder = 0;
6818                 state->underscan_vborder = 0;
6819                 state->base.max_requested_bpc = 8;
6820                 state->vcpi_slots = 0;
6821                 state->pbn = 0;
6822                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6823                         state->abm_level = amdgpu_dm_abm_level;
6824
6825                 __drm_atomic_helper_connector_reset(connector, &state->base);
6826         }
6827 }
6828
6829 struct drm_connector_state *
6830 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6831 {
6832         struct dm_connector_state *state =
6833                 to_dm_connector_state(connector->state);
6834
6835         struct dm_connector_state *new_state =
6836                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6837
6838         if (!new_state)
6839                 return NULL;
6840
6841         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6842
6843         new_state->freesync_capable = state->freesync_capable;
6844         new_state->abm_level = state->abm_level;
6845         new_state->scaling = state->scaling;
6846         new_state->underscan_enable = state->underscan_enable;
6847         new_state->underscan_hborder = state->underscan_hborder;
6848         new_state->underscan_vborder = state->underscan_vborder;
6849         new_state->vcpi_slots = state->vcpi_slots;
6850         new_state->pbn = state->pbn;
6851         return &new_state->base;
6852 }
6853
6854 static int
6855 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6856 {
6857         struct amdgpu_dm_connector *amdgpu_dm_connector =
6858                 to_amdgpu_dm_connector(connector);
6859         int r;
6860
6861         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6862             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6863                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6864                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6865                 if (r)
6866                         return r;
6867         }
6868
6869 #if defined(CONFIG_DEBUG_FS)
6870         connector_debugfs_init(amdgpu_dm_connector);
6871 #endif
6872
6873         return 0;
6874 }
6875
6876 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6877         .reset = amdgpu_dm_connector_funcs_reset,
6878         .detect = amdgpu_dm_connector_detect,
6879         .fill_modes = drm_helper_probe_single_connector_modes,
6880         .destroy = amdgpu_dm_connector_destroy,
6881         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6882         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6883         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6884         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6885         .late_register = amdgpu_dm_connector_late_register,
6886         .early_unregister = amdgpu_dm_connector_unregister
6887 };
6888
6889 static int get_modes(struct drm_connector *connector)
6890 {
6891         return amdgpu_dm_connector_get_modes(connector);
6892 }
6893
6894 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6895 {
6896         struct dc_sink_init_data init_params = {
6897                         .link = aconnector->dc_link,
6898                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6899         };
6900         struct edid *edid;
6901
6902         if (!aconnector->base.edid_blob_ptr) {
6903                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6904                                 aconnector->base.name);
6905
6906                 aconnector->base.force = DRM_FORCE_OFF;
6907                 aconnector->base.override_edid = false;
6908                 return;
6909         }
6910
6911         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6912
6913         aconnector->edid = edid;
6914
6915         aconnector->dc_em_sink = dc_link_add_remote_sink(
6916                 aconnector->dc_link,
6917                 (uint8_t *)edid,
6918                 (edid->extensions + 1) * EDID_LENGTH,
6919                 &init_params);
6920
6921         if (aconnector->base.force == DRM_FORCE_ON) {
6922                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6923                 aconnector->dc_link->local_sink :
6924                 aconnector->dc_em_sink;
6925                 dc_sink_retain(aconnector->dc_sink);
6926         }
6927 }
6928
6929 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6930 {
6931         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6932
6933         /*
6934          * In case of headless boot with force on for DP managed connector
6935          * Those settings have to be != 0 to get initial modeset
6936          */
6937         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6938                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6939                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6940         }
6941
6942
6943         aconnector->base.override_edid = true;
6944         create_eml_sink(aconnector);
6945 }
6946
6947 static struct dc_stream_state *
6948 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6949                                 const struct drm_display_mode *drm_mode,
6950                                 const struct dm_connector_state *dm_state,
6951                                 const struct dc_stream_state *old_stream)
6952 {
6953         struct drm_connector *connector = &aconnector->base;
6954         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6955         struct dc_stream_state *stream;
6956         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6957         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6958         enum dc_status dc_result = DC_OK;
6959
6960         do {
6961                 stream = create_stream_for_sink(aconnector, drm_mode,
6962                                                 dm_state, old_stream,
6963                                                 requested_bpc);
6964                 if (stream == NULL) {
6965                         DRM_ERROR("Failed to create stream for sink!\n");
6966                         break;
6967                 }
6968
6969                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6970
6971                 if (dc_result != DC_OK) {
6972                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6973                                       drm_mode->hdisplay,
6974                                       drm_mode->vdisplay,
6975                                       drm_mode->clock,
6976                                       dc_result,
6977                                       dc_status_to_str(dc_result));
6978
6979                         dc_stream_release(stream);
6980                         stream = NULL;
6981                         requested_bpc -= 2; /* lower bpc to retry validation */
6982                 }
6983
6984         } while (stream == NULL && requested_bpc >= 6);
6985
6986         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6987                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6988
6989                 aconnector->force_yuv420_output = true;
6990                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6991                                                 dm_state, old_stream);
6992                 aconnector->force_yuv420_output = false;
6993         }
6994
6995         return stream;
6996 }
6997
6998 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6999                                    struct drm_display_mode *mode)
7000 {
7001         int result = MODE_ERROR;
7002         struct dc_sink *dc_sink;
7003         /* TODO: Unhardcode stream count */
7004         struct dc_stream_state *stream;
7005         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7006
7007         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7008                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7009                 return result;
7010
7011         /*
7012          * Only run this the first time mode_valid is called to initilialize
7013          * EDID mgmt
7014          */
7015         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7016                 !aconnector->dc_em_sink)
7017                 handle_edid_mgmt(aconnector);
7018
7019         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7020
7021         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7022                                 aconnector->base.force != DRM_FORCE_ON) {
7023                 DRM_ERROR("dc_sink is NULL!\n");
7024                 goto fail;
7025         }
7026
7027         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7028         if (stream) {
7029                 dc_stream_release(stream);
7030                 result = MODE_OK;
7031         }
7032
7033 fail:
7034         /* TODO: error handling*/
7035         return result;
7036 }
7037
7038 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7039                                 struct dc_info_packet *out)
7040 {
7041         struct hdmi_drm_infoframe frame;
7042         unsigned char buf[30]; /* 26 + 4 */
7043         ssize_t len;
7044         int ret, i;
7045
7046         memset(out, 0, sizeof(*out));
7047
7048         if (!state->hdr_output_metadata)
7049                 return 0;
7050
7051         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7052         if (ret)
7053                 return ret;
7054
7055         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7056         if (len < 0)
7057                 return (int)len;
7058
7059         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7060         if (len != 30)
7061                 return -EINVAL;
7062
7063         /* Prepare the infopacket for DC. */
7064         switch (state->connector->connector_type) {
7065         case DRM_MODE_CONNECTOR_HDMIA:
7066                 out->hb0 = 0x87; /* type */
7067                 out->hb1 = 0x01; /* version */
7068                 out->hb2 = 0x1A; /* length */
7069                 out->sb[0] = buf[3]; /* checksum */
7070                 i = 1;
7071                 break;
7072
7073         case DRM_MODE_CONNECTOR_DisplayPort:
7074         case DRM_MODE_CONNECTOR_eDP:
7075                 out->hb0 = 0x00; /* sdp id, zero */
7076                 out->hb1 = 0x87; /* type */
7077                 out->hb2 = 0x1D; /* payload len - 1 */
7078                 out->hb3 = (0x13 << 2); /* sdp version */
7079                 out->sb[0] = 0x01; /* version */
7080                 out->sb[1] = 0x1A; /* length */
7081                 i = 2;
7082                 break;
7083
7084         default:
7085                 return -EINVAL;
7086         }
7087
7088         memcpy(&out->sb[i], &buf[4], 26);
7089         out->valid = true;
7090
7091         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7092                        sizeof(out->sb), false);
7093
7094         return 0;
7095 }
7096
7097 static int
7098 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7099                                  struct drm_atomic_state *state)
7100 {
7101         struct drm_connector_state *new_con_state =
7102                 drm_atomic_get_new_connector_state(state, conn);
7103         struct drm_connector_state *old_con_state =
7104                 drm_atomic_get_old_connector_state(state, conn);
7105         struct drm_crtc *crtc = new_con_state->crtc;
7106         struct drm_crtc_state *new_crtc_state;
7107         int ret;
7108
7109         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7110
7111         if (!crtc)
7112                 return 0;
7113
7114         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7115                 struct dc_info_packet hdr_infopacket;
7116
7117                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7118                 if (ret)
7119                         return ret;
7120
7121                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7122                 if (IS_ERR(new_crtc_state))
7123                         return PTR_ERR(new_crtc_state);
7124
7125                 /*
7126                  * DC considers the stream backends changed if the
7127                  * static metadata changes. Forcing the modeset also
7128                  * gives a simple way for userspace to switch from
7129                  * 8bpc to 10bpc when setting the metadata to enter
7130                  * or exit HDR.
7131                  *
7132                  * Changing the static metadata after it's been
7133                  * set is permissible, however. So only force a
7134                  * modeset if we're entering or exiting HDR.
7135                  */
7136                 new_crtc_state->mode_changed =
7137                         !old_con_state->hdr_output_metadata ||
7138                         !new_con_state->hdr_output_metadata;
7139         }
7140
7141         return 0;
7142 }
7143
7144 static const struct drm_connector_helper_funcs
7145 amdgpu_dm_connector_helper_funcs = {
7146         /*
7147          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7148          * modes will be filtered by drm_mode_validate_size(), and those modes
7149          * are missing after user start lightdm. So we need to renew modes list.
7150          * in get_modes call back, not just return the modes count
7151          */
7152         .get_modes = get_modes,
7153         .mode_valid = amdgpu_dm_connector_mode_valid,
7154         .atomic_check = amdgpu_dm_connector_atomic_check,
7155 };
7156
7157 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7158 {
7159 }
7160
7161 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7162 {
7163         struct drm_atomic_state *state = new_crtc_state->state;
7164         struct drm_plane *plane;
7165         int num_active = 0;
7166
7167         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7168                 struct drm_plane_state *new_plane_state;
7169
7170                 /* Cursor planes are "fake". */
7171                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7172                         continue;
7173
7174                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7175
7176                 if (!new_plane_state) {
7177                         /*
7178                          * The plane is enable on the CRTC and hasn't changed
7179                          * state. This means that it previously passed
7180                          * validation and is therefore enabled.
7181                          */
7182                         num_active += 1;
7183                         continue;
7184                 }
7185
7186                 /* We need a framebuffer to be considered enabled. */
7187                 num_active += (new_plane_state->fb != NULL);
7188         }
7189
7190         return num_active;
7191 }
7192
7193 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7194                                          struct drm_crtc_state *new_crtc_state)
7195 {
7196         struct dm_crtc_state *dm_new_crtc_state =
7197                 to_dm_crtc_state(new_crtc_state);
7198
7199         dm_new_crtc_state->active_planes = 0;
7200
7201         if (!dm_new_crtc_state->stream)
7202                 return;
7203
7204         dm_new_crtc_state->active_planes =
7205                 count_crtc_active_planes(new_crtc_state);
7206 }
7207
7208 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7209                                        struct drm_atomic_state *state)
7210 {
7211         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7212                                                                           crtc);
7213         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7214         struct dc *dc = adev->dm.dc;
7215         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7216         int ret = -EINVAL;
7217
7218         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7219
7220         dm_update_crtc_active_planes(crtc, crtc_state);
7221
7222         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7223                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7224                 return ret;
7225         }
7226
7227         /*
7228          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7229          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7230          * planes are disabled, which is not supported by the hardware. And there is legacy
7231          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7232          */
7233         if (crtc_state->enable &&
7234             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7235                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7236                 return -EINVAL;
7237         }
7238
7239         /* In some use cases, like reset, no stream is attached */
7240         if (!dm_crtc_state->stream)
7241                 return 0;
7242
7243         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7244                 return 0;
7245
7246         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7247         return ret;
7248 }
7249
7250 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7251                                       const struct drm_display_mode *mode,
7252                                       struct drm_display_mode *adjusted_mode)
7253 {
7254         return true;
7255 }
7256
7257 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7258         .disable = dm_crtc_helper_disable,
7259         .atomic_check = dm_crtc_helper_atomic_check,
7260         .mode_fixup = dm_crtc_helper_mode_fixup,
7261         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7262 };
7263
7264 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7265 {
7266
7267 }
7268
7269 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7270 {
7271         switch (display_color_depth) {
7272                 case COLOR_DEPTH_666:
7273                         return 6;
7274                 case COLOR_DEPTH_888:
7275                         return 8;
7276                 case COLOR_DEPTH_101010:
7277                         return 10;
7278                 case COLOR_DEPTH_121212:
7279                         return 12;
7280                 case COLOR_DEPTH_141414:
7281                         return 14;
7282                 case COLOR_DEPTH_161616:
7283                         return 16;
7284                 default:
7285                         break;
7286                 }
7287         return 0;
7288 }
7289
7290 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7291                                           struct drm_crtc_state *crtc_state,
7292                                           struct drm_connector_state *conn_state)
7293 {
7294         struct drm_atomic_state *state = crtc_state->state;
7295         struct drm_connector *connector = conn_state->connector;
7296         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7297         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7298         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7299         struct drm_dp_mst_topology_mgr *mst_mgr;
7300         struct drm_dp_mst_port *mst_port;
7301         enum dc_color_depth color_depth;
7302         int clock, bpp = 0;
7303         bool is_y420 = false;
7304
7305         if (!aconnector->port || !aconnector->dc_sink)
7306                 return 0;
7307
7308         mst_port = aconnector->port;
7309         mst_mgr = &aconnector->mst_port->mst_mgr;
7310
7311         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7312                 return 0;
7313
7314         if (!state->duplicated) {
7315                 int max_bpc = conn_state->max_requested_bpc;
7316                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7317                                 aconnector->force_yuv420_output;
7318                 color_depth = convert_color_depth_from_display_info(connector,
7319                                                                     is_y420,
7320                                                                     max_bpc);
7321                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7322                 clock = adjusted_mode->clock;
7323                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7324         }
7325         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7326                                                                            mst_mgr,
7327                                                                            mst_port,
7328                                                                            dm_new_connector_state->pbn,
7329                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7330         if (dm_new_connector_state->vcpi_slots < 0) {
7331                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7332                 return dm_new_connector_state->vcpi_slots;
7333         }
7334         return 0;
7335 }
7336
7337 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7338         .disable = dm_encoder_helper_disable,
7339         .atomic_check = dm_encoder_helper_atomic_check
7340 };
7341
7342 #if defined(CONFIG_DRM_AMD_DC_DCN)
7343 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7344                                             struct dc_state *dc_state,
7345                                             struct dsc_mst_fairness_vars *vars)
7346 {
7347         struct dc_stream_state *stream = NULL;
7348         struct drm_connector *connector;
7349         struct drm_connector_state *new_con_state;
7350         struct amdgpu_dm_connector *aconnector;
7351         struct dm_connector_state *dm_conn_state;
7352         int i, j;
7353         int vcpi, pbn_div, pbn, slot_num = 0;
7354
7355         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7356
7357                 aconnector = to_amdgpu_dm_connector(connector);
7358
7359                 if (!aconnector->port)
7360                         continue;
7361
7362                 if (!new_con_state || !new_con_state->crtc)
7363                         continue;
7364
7365                 dm_conn_state = to_dm_connector_state(new_con_state);
7366
7367                 for (j = 0; j < dc_state->stream_count; j++) {
7368                         stream = dc_state->streams[j];
7369                         if (!stream)
7370                                 continue;
7371
7372                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7373                                 break;
7374
7375                         stream = NULL;
7376                 }
7377
7378                 if (!stream)
7379                         continue;
7380
7381                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7382                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7383                 for (j = 0; j < dc_state->stream_count; j++) {
7384                         if (vars[j].aconnector == aconnector) {
7385                                 pbn = vars[j].pbn;
7386                                 break;
7387                         }
7388                 }
7389
7390                 if (j == dc_state->stream_count)
7391                         continue;
7392
7393                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7394
7395                 if (stream->timing.flags.DSC != 1) {
7396                         dm_conn_state->pbn = pbn;
7397                         dm_conn_state->vcpi_slots = slot_num;
7398
7399                         drm_dp_mst_atomic_enable_dsc(state,
7400                                                      aconnector->port,
7401                                                      dm_conn_state->pbn,
7402                                                      0,
7403                                                      false);
7404                         continue;
7405                 }
7406
7407                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7408                                                     aconnector->port,
7409                                                     pbn, pbn_div,
7410                                                     true);
7411                 if (vcpi < 0)
7412                         return vcpi;
7413
7414                 dm_conn_state->pbn = pbn;
7415                 dm_conn_state->vcpi_slots = vcpi;
7416         }
7417         return 0;
7418 }
7419 #endif
7420
7421 static void dm_drm_plane_reset(struct drm_plane *plane)
7422 {
7423         struct dm_plane_state *amdgpu_state = NULL;
7424
7425         if (plane->state)
7426                 plane->funcs->atomic_destroy_state(plane, plane->state);
7427
7428         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7429         WARN_ON(amdgpu_state == NULL);
7430
7431         if (amdgpu_state)
7432                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7433 }
7434
7435 static struct drm_plane_state *
7436 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7437 {
7438         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7439
7440         old_dm_plane_state = to_dm_plane_state(plane->state);
7441         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7442         if (!dm_plane_state)
7443                 return NULL;
7444
7445         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7446
7447         if (old_dm_plane_state->dc_state) {
7448                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7449                 dc_plane_state_retain(dm_plane_state->dc_state);
7450         }
7451
7452         return &dm_plane_state->base;
7453 }
7454
7455 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7456                                 struct drm_plane_state *state)
7457 {
7458         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7459
7460         if (dm_plane_state->dc_state)
7461                 dc_plane_state_release(dm_plane_state->dc_state);
7462
7463         drm_atomic_helper_plane_destroy_state(plane, state);
7464 }
7465
7466 static const struct drm_plane_funcs dm_plane_funcs = {
7467         .update_plane   = drm_atomic_helper_update_plane,
7468         .disable_plane  = drm_atomic_helper_disable_plane,
7469         .destroy        = drm_primary_helper_destroy,
7470         .reset = dm_drm_plane_reset,
7471         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7472         .atomic_destroy_state = dm_drm_plane_destroy_state,
7473         .format_mod_supported = dm_plane_format_mod_supported,
7474 };
7475
7476 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7477                                       struct drm_plane_state *new_state)
7478 {
7479         struct amdgpu_framebuffer *afb;
7480         struct drm_gem_object *obj;
7481         struct amdgpu_device *adev;
7482         struct amdgpu_bo *rbo;
7483         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7484         struct list_head list;
7485         struct ttm_validate_buffer tv;
7486         struct ww_acquire_ctx ticket;
7487         uint32_t domain;
7488         int r;
7489
7490         if (!new_state->fb) {
7491                 DRM_DEBUG_KMS("No FB bound\n");
7492                 return 0;
7493         }
7494
7495         afb = to_amdgpu_framebuffer(new_state->fb);
7496         obj = new_state->fb->obj[0];
7497         rbo = gem_to_amdgpu_bo(obj);
7498         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7499         INIT_LIST_HEAD(&list);
7500
7501         tv.bo = &rbo->tbo;
7502         tv.num_shared = 1;
7503         list_add(&tv.head, &list);
7504
7505         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7506         if (r) {
7507                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7508                 return r;
7509         }
7510
7511         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7512                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7513         else
7514                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7515
7516         r = amdgpu_bo_pin(rbo, domain);
7517         if (unlikely(r != 0)) {
7518                 if (r != -ERESTARTSYS)
7519                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7520                 ttm_eu_backoff_reservation(&ticket, &list);
7521                 return r;
7522         }
7523
7524         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7525         if (unlikely(r != 0)) {
7526                 amdgpu_bo_unpin(rbo);
7527                 ttm_eu_backoff_reservation(&ticket, &list);
7528                 DRM_ERROR("%p bind failed\n", rbo);
7529                 return r;
7530         }
7531
7532         ttm_eu_backoff_reservation(&ticket, &list);
7533
7534         afb->address = amdgpu_bo_gpu_offset(rbo);
7535
7536         amdgpu_bo_ref(rbo);
7537
7538         /**
7539          * We don't do surface updates on planes that have been newly created,
7540          * but we also don't have the afb->address during atomic check.
7541          *
7542          * Fill in buffer attributes depending on the address here, but only on
7543          * newly created planes since they're not being used by DC yet and this
7544          * won't modify global state.
7545          */
7546         dm_plane_state_old = to_dm_plane_state(plane->state);
7547         dm_plane_state_new = to_dm_plane_state(new_state);
7548
7549         if (dm_plane_state_new->dc_state &&
7550             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7551                 struct dc_plane_state *plane_state =
7552                         dm_plane_state_new->dc_state;
7553                 bool force_disable_dcc = !plane_state->dcc.enable;
7554
7555                 fill_plane_buffer_attributes(
7556                         adev, afb, plane_state->format, plane_state->rotation,
7557                         afb->tiling_flags,
7558                         &plane_state->tiling_info, &plane_state->plane_size,
7559                         &plane_state->dcc, &plane_state->address,
7560                         afb->tmz_surface, force_disable_dcc);
7561         }
7562
7563         return 0;
7564 }
7565
7566 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7567                                        struct drm_plane_state *old_state)
7568 {
7569         struct amdgpu_bo *rbo;
7570         int r;
7571
7572         if (!old_state->fb)
7573                 return;
7574
7575         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7576         r = amdgpu_bo_reserve(rbo, false);
7577         if (unlikely(r)) {
7578                 DRM_ERROR("failed to reserve rbo before unpin\n");
7579                 return;
7580         }
7581
7582         amdgpu_bo_unpin(rbo);
7583         amdgpu_bo_unreserve(rbo);
7584         amdgpu_bo_unref(&rbo);
7585 }
7586
7587 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7588                                        struct drm_crtc_state *new_crtc_state)
7589 {
7590         struct drm_framebuffer *fb = state->fb;
7591         int min_downscale, max_upscale;
7592         int min_scale = 0;
7593         int max_scale = INT_MAX;
7594
7595         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7596         if (fb && state->crtc) {
7597                 /* Validate viewport to cover the case when only the position changes */
7598                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7599                         int viewport_width = state->crtc_w;
7600                         int viewport_height = state->crtc_h;
7601
7602                         if (state->crtc_x < 0)
7603                                 viewport_width += state->crtc_x;
7604                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7605                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7606
7607                         if (state->crtc_y < 0)
7608                                 viewport_height += state->crtc_y;
7609                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7610                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7611
7612                         if (viewport_width < 0 || viewport_height < 0) {
7613                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7614                                 return -EINVAL;
7615                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7616                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7617                                 return -EINVAL;
7618                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7619                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7620                                 return -EINVAL;
7621                         }
7622
7623                 }
7624
7625                 /* Get min/max allowed scaling factors from plane caps. */
7626                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7627                                              &min_downscale, &max_upscale);
7628                 /*
7629                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7630                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7631                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7632                  */
7633                 min_scale = (1000 << 16) / max_upscale;
7634                 max_scale = (1000 << 16) / min_downscale;
7635         }
7636
7637         return drm_atomic_helper_check_plane_state(
7638                 state, new_crtc_state, min_scale, max_scale, true, true);
7639 }
7640
7641 static int dm_plane_atomic_check(struct drm_plane *plane,
7642                                  struct drm_atomic_state *state)
7643 {
7644         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7645                                                                                  plane);
7646         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7647         struct dc *dc = adev->dm.dc;
7648         struct dm_plane_state *dm_plane_state;
7649         struct dc_scaling_info scaling_info;
7650         struct drm_crtc_state *new_crtc_state;
7651         int ret;
7652
7653         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7654
7655         dm_plane_state = to_dm_plane_state(new_plane_state);
7656
7657         if (!dm_plane_state->dc_state)
7658                 return 0;
7659
7660         new_crtc_state =
7661                 drm_atomic_get_new_crtc_state(state,
7662                                               new_plane_state->crtc);
7663         if (!new_crtc_state)
7664                 return -EINVAL;
7665
7666         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7667         if (ret)
7668                 return ret;
7669
7670         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7671         if (ret)
7672                 return ret;
7673
7674         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7675                 return 0;
7676
7677         return -EINVAL;
7678 }
7679
7680 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7681                                        struct drm_atomic_state *state)
7682 {
7683         /* Only support async updates on cursor planes. */
7684         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7685                 return -EINVAL;
7686
7687         return 0;
7688 }
7689
7690 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7691                                          struct drm_atomic_state *state)
7692 {
7693         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7694                                                                            plane);
7695         struct drm_plane_state *old_state =
7696                 drm_atomic_get_old_plane_state(state, plane);
7697
7698         trace_amdgpu_dm_atomic_update_cursor(new_state);
7699
7700         swap(plane->state->fb, new_state->fb);
7701
7702         plane->state->src_x = new_state->src_x;
7703         plane->state->src_y = new_state->src_y;
7704         plane->state->src_w = new_state->src_w;
7705         plane->state->src_h = new_state->src_h;
7706         plane->state->crtc_x = new_state->crtc_x;
7707         plane->state->crtc_y = new_state->crtc_y;
7708         plane->state->crtc_w = new_state->crtc_w;
7709         plane->state->crtc_h = new_state->crtc_h;
7710
7711         handle_cursor_update(plane, old_state);
7712 }
7713
7714 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7715         .prepare_fb = dm_plane_helper_prepare_fb,
7716         .cleanup_fb = dm_plane_helper_cleanup_fb,
7717         .atomic_check = dm_plane_atomic_check,
7718         .atomic_async_check = dm_plane_atomic_async_check,
7719         .atomic_async_update = dm_plane_atomic_async_update
7720 };
7721
7722 /*
7723  * TODO: these are currently initialized to rgb formats only.
7724  * For future use cases we should either initialize them dynamically based on
7725  * plane capabilities, or initialize this array to all formats, so internal drm
7726  * check will succeed, and let DC implement proper check
7727  */
7728 static const uint32_t rgb_formats[] = {
7729         DRM_FORMAT_XRGB8888,
7730         DRM_FORMAT_ARGB8888,
7731         DRM_FORMAT_RGBA8888,
7732         DRM_FORMAT_XRGB2101010,
7733         DRM_FORMAT_XBGR2101010,
7734         DRM_FORMAT_ARGB2101010,
7735         DRM_FORMAT_ABGR2101010,
7736         DRM_FORMAT_XRGB16161616,
7737         DRM_FORMAT_XBGR16161616,
7738         DRM_FORMAT_ARGB16161616,
7739         DRM_FORMAT_ABGR16161616,
7740         DRM_FORMAT_XBGR8888,
7741         DRM_FORMAT_ABGR8888,
7742         DRM_FORMAT_RGB565,
7743 };
7744
7745 static const uint32_t overlay_formats[] = {
7746         DRM_FORMAT_XRGB8888,
7747         DRM_FORMAT_ARGB8888,
7748         DRM_FORMAT_RGBA8888,
7749         DRM_FORMAT_XBGR8888,
7750         DRM_FORMAT_ABGR8888,
7751         DRM_FORMAT_RGB565
7752 };
7753
7754 static const u32 cursor_formats[] = {
7755         DRM_FORMAT_ARGB8888
7756 };
7757
7758 static int get_plane_formats(const struct drm_plane *plane,
7759                              const struct dc_plane_cap *plane_cap,
7760                              uint32_t *formats, int max_formats)
7761 {
7762         int i, num_formats = 0;
7763
7764         /*
7765          * TODO: Query support for each group of formats directly from
7766          * DC plane caps. This will require adding more formats to the
7767          * caps list.
7768          */
7769
7770         switch (plane->type) {
7771         case DRM_PLANE_TYPE_PRIMARY:
7772                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7773                         if (num_formats >= max_formats)
7774                                 break;
7775
7776                         formats[num_formats++] = rgb_formats[i];
7777                 }
7778
7779                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7780                         formats[num_formats++] = DRM_FORMAT_NV12;
7781                 if (plane_cap && plane_cap->pixel_format_support.p010)
7782                         formats[num_formats++] = DRM_FORMAT_P010;
7783                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7784                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7785                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7786                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7787                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7788                 }
7789                 break;
7790
7791         case DRM_PLANE_TYPE_OVERLAY:
7792                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7793                         if (num_formats >= max_formats)
7794                                 break;
7795
7796                         formats[num_formats++] = overlay_formats[i];
7797                 }
7798                 break;
7799
7800         case DRM_PLANE_TYPE_CURSOR:
7801                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7802                         if (num_formats >= max_formats)
7803                                 break;
7804
7805                         formats[num_formats++] = cursor_formats[i];
7806                 }
7807                 break;
7808         }
7809
7810         return num_formats;
7811 }
7812
7813 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7814                                 struct drm_plane *plane,
7815                                 unsigned long possible_crtcs,
7816                                 const struct dc_plane_cap *plane_cap)
7817 {
7818         uint32_t formats[32];
7819         int num_formats;
7820         int res = -EPERM;
7821         unsigned int supported_rotations;
7822         uint64_t *modifiers = NULL;
7823
7824         num_formats = get_plane_formats(plane, plane_cap, formats,
7825                                         ARRAY_SIZE(formats));
7826
7827         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7828         if (res)
7829                 return res;
7830
7831         if (modifiers == NULL)
7832                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7833
7834         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7835                                        &dm_plane_funcs, formats, num_formats,
7836                                        modifiers, plane->type, NULL);
7837         kfree(modifiers);
7838         if (res)
7839                 return res;
7840
7841         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7842             plane_cap && plane_cap->per_pixel_alpha) {
7843                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7844                                           BIT(DRM_MODE_BLEND_PREMULTI);
7845
7846                 drm_plane_create_alpha_property(plane);
7847                 drm_plane_create_blend_mode_property(plane, blend_caps);
7848         }
7849
7850         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7851             plane_cap &&
7852             (plane_cap->pixel_format_support.nv12 ||
7853              plane_cap->pixel_format_support.p010)) {
7854                 /* This only affects YUV formats. */
7855                 drm_plane_create_color_properties(
7856                         plane,
7857                         BIT(DRM_COLOR_YCBCR_BT601) |
7858                         BIT(DRM_COLOR_YCBCR_BT709) |
7859                         BIT(DRM_COLOR_YCBCR_BT2020),
7860                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7861                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7862                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7863         }
7864
7865         supported_rotations =
7866                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7867                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7868
7869         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7870             plane->type != DRM_PLANE_TYPE_CURSOR)
7871                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7872                                                    supported_rotations);
7873
7874         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7875
7876         /* Create (reset) the plane state */
7877         if (plane->funcs->reset)
7878                 plane->funcs->reset(plane);
7879
7880         return 0;
7881 }
7882
7883 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7884                                struct drm_plane *plane,
7885                                uint32_t crtc_index)
7886 {
7887         struct amdgpu_crtc *acrtc = NULL;
7888         struct drm_plane *cursor_plane;
7889
7890         int res = -ENOMEM;
7891
7892         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7893         if (!cursor_plane)
7894                 goto fail;
7895
7896         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7897         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7898
7899         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7900         if (!acrtc)
7901                 goto fail;
7902
7903         res = drm_crtc_init_with_planes(
7904                         dm->ddev,
7905                         &acrtc->base,
7906                         plane,
7907                         cursor_plane,
7908                         &amdgpu_dm_crtc_funcs, NULL);
7909
7910         if (res)
7911                 goto fail;
7912
7913         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7914
7915         /* Create (reset) the plane state */
7916         if (acrtc->base.funcs->reset)
7917                 acrtc->base.funcs->reset(&acrtc->base);
7918
7919         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7920         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7921
7922         acrtc->crtc_id = crtc_index;
7923         acrtc->base.enabled = false;
7924         acrtc->otg_inst = -1;
7925
7926         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7927         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7928                                    true, MAX_COLOR_LUT_ENTRIES);
7929         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7930
7931         return 0;
7932
7933 fail:
7934         kfree(acrtc);
7935         kfree(cursor_plane);
7936         return res;
7937 }
7938
7939
7940 static int to_drm_connector_type(enum signal_type st)
7941 {
7942         switch (st) {
7943         case SIGNAL_TYPE_HDMI_TYPE_A:
7944                 return DRM_MODE_CONNECTOR_HDMIA;
7945         case SIGNAL_TYPE_EDP:
7946                 return DRM_MODE_CONNECTOR_eDP;
7947         case SIGNAL_TYPE_LVDS:
7948                 return DRM_MODE_CONNECTOR_LVDS;
7949         case SIGNAL_TYPE_RGB:
7950                 return DRM_MODE_CONNECTOR_VGA;
7951         case SIGNAL_TYPE_DISPLAY_PORT:
7952         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7953                 return DRM_MODE_CONNECTOR_DisplayPort;
7954         case SIGNAL_TYPE_DVI_DUAL_LINK:
7955         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7956                 return DRM_MODE_CONNECTOR_DVID;
7957         case SIGNAL_TYPE_VIRTUAL:
7958                 return DRM_MODE_CONNECTOR_VIRTUAL;
7959
7960         default:
7961                 return DRM_MODE_CONNECTOR_Unknown;
7962         }
7963 }
7964
7965 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7966 {
7967         struct drm_encoder *encoder;
7968
7969         /* There is only one encoder per connector */
7970         drm_connector_for_each_possible_encoder(connector, encoder)
7971                 return encoder;
7972
7973         return NULL;
7974 }
7975
7976 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7977 {
7978         struct drm_encoder *encoder;
7979         struct amdgpu_encoder *amdgpu_encoder;
7980
7981         encoder = amdgpu_dm_connector_to_encoder(connector);
7982
7983         if (encoder == NULL)
7984                 return;
7985
7986         amdgpu_encoder = to_amdgpu_encoder(encoder);
7987
7988         amdgpu_encoder->native_mode.clock = 0;
7989
7990         if (!list_empty(&connector->probed_modes)) {
7991                 struct drm_display_mode *preferred_mode = NULL;
7992
7993                 list_for_each_entry(preferred_mode,
7994                                     &connector->probed_modes,
7995                                     head) {
7996                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7997                                 amdgpu_encoder->native_mode = *preferred_mode;
7998
7999                         break;
8000                 }
8001
8002         }
8003 }
8004
8005 static struct drm_display_mode *
8006 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8007                              char *name,
8008                              int hdisplay, int vdisplay)
8009 {
8010         struct drm_device *dev = encoder->dev;
8011         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8012         struct drm_display_mode *mode = NULL;
8013         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8014
8015         mode = drm_mode_duplicate(dev, native_mode);
8016
8017         if (mode == NULL)
8018                 return NULL;
8019
8020         mode->hdisplay = hdisplay;
8021         mode->vdisplay = vdisplay;
8022         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8023         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8024
8025         return mode;
8026
8027 }
8028
8029 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8030                                                  struct drm_connector *connector)
8031 {
8032         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8033         struct drm_display_mode *mode = NULL;
8034         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8035         struct amdgpu_dm_connector *amdgpu_dm_connector =
8036                                 to_amdgpu_dm_connector(connector);
8037         int i;
8038         int n;
8039         struct mode_size {
8040                 char name[DRM_DISPLAY_MODE_LEN];
8041                 int w;
8042                 int h;
8043         } common_modes[] = {
8044                 {  "640x480",  640,  480},
8045                 {  "800x600",  800,  600},
8046                 { "1024x768", 1024,  768},
8047                 { "1280x720", 1280,  720},
8048                 { "1280x800", 1280,  800},
8049                 {"1280x1024", 1280, 1024},
8050                 { "1440x900", 1440,  900},
8051                 {"1680x1050", 1680, 1050},
8052                 {"1600x1200", 1600, 1200},
8053                 {"1920x1080", 1920, 1080},
8054                 {"1920x1200", 1920, 1200}
8055         };
8056
8057         n = ARRAY_SIZE(common_modes);
8058
8059         for (i = 0; i < n; i++) {
8060                 struct drm_display_mode *curmode = NULL;
8061                 bool mode_existed = false;
8062
8063                 if (common_modes[i].w > native_mode->hdisplay ||
8064                     common_modes[i].h > native_mode->vdisplay ||
8065                    (common_modes[i].w == native_mode->hdisplay &&
8066                     common_modes[i].h == native_mode->vdisplay))
8067                         continue;
8068
8069                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8070                         if (common_modes[i].w == curmode->hdisplay &&
8071                             common_modes[i].h == curmode->vdisplay) {
8072                                 mode_existed = true;
8073                                 break;
8074                         }
8075                 }
8076
8077                 if (mode_existed)
8078                         continue;
8079
8080                 mode = amdgpu_dm_create_common_mode(encoder,
8081                                 common_modes[i].name, common_modes[i].w,
8082                                 common_modes[i].h);
8083                 drm_mode_probed_add(connector, mode);
8084                 amdgpu_dm_connector->num_modes++;
8085         }
8086 }
8087
8088 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8089 {
8090         struct drm_encoder *encoder;
8091         struct amdgpu_encoder *amdgpu_encoder;
8092         const struct drm_display_mode *native_mode;
8093
8094         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8095             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8096                 return;
8097
8098         encoder = amdgpu_dm_connector_to_encoder(connector);
8099         if (!encoder)
8100                 return;
8101
8102         amdgpu_encoder = to_amdgpu_encoder(encoder);
8103
8104         native_mode = &amdgpu_encoder->native_mode;
8105         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8106                 return;
8107
8108         drm_connector_set_panel_orientation_with_quirk(connector,
8109                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8110                                                        native_mode->hdisplay,
8111                                                        native_mode->vdisplay);
8112 }
8113
8114 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8115                                               struct edid *edid)
8116 {
8117         struct amdgpu_dm_connector *amdgpu_dm_connector =
8118                         to_amdgpu_dm_connector(connector);
8119
8120         if (edid) {
8121                 /* empty probed_modes */
8122                 INIT_LIST_HEAD(&connector->probed_modes);
8123                 amdgpu_dm_connector->num_modes =
8124                                 drm_add_edid_modes(connector, edid);
8125
8126                 /* sorting the probed modes before calling function
8127                  * amdgpu_dm_get_native_mode() since EDID can have
8128                  * more than one preferred mode. The modes that are
8129                  * later in the probed mode list could be of higher
8130                  * and preferred resolution. For example, 3840x2160
8131                  * resolution in base EDID preferred timing and 4096x2160
8132                  * preferred resolution in DID extension block later.
8133                  */
8134                 drm_mode_sort(&connector->probed_modes);
8135                 amdgpu_dm_get_native_mode(connector);
8136
8137                 /* Freesync capabilities are reset by calling
8138                  * drm_add_edid_modes() and need to be
8139                  * restored here.
8140                  */
8141                 amdgpu_dm_update_freesync_caps(connector, edid);
8142
8143                 amdgpu_set_panel_orientation(connector);
8144         } else {
8145                 amdgpu_dm_connector->num_modes = 0;
8146         }
8147 }
8148
8149 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8150                               struct drm_display_mode *mode)
8151 {
8152         struct drm_display_mode *m;
8153
8154         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8155                 if (drm_mode_equal(m, mode))
8156                         return true;
8157         }
8158
8159         return false;
8160 }
8161
8162 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8163 {
8164         const struct drm_display_mode *m;
8165         struct drm_display_mode *new_mode;
8166         uint i;
8167         uint32_t new_modes_count = 0;
8168
8169         /* Standard FPS values
8170          *
8171          * 23.976       - TV/NTSC
8172          * 24           - Cinema
8173          * 25           - TV/PAL
8174          * 29.97        - TV/NTSC
8175          * 30           - TV/NTSC
8176          * 48           - Cinema HFR
8177          * 50           - TV/PAL
8178          * 60           - Commonly used
8179          * 48,72,96,120 - Multiples of 24
8180          */
8181         static const uint32_t common_rates[] = {
8182                 23976, 24000, 25000, 29970, 30000,
8183                 48000, 50000, 60000, 72000, 96000, 120000
8184         };
8185
8186         /*
8187          * Find mode with highest refresh rate with the same resolution
8188          * as the preferred mode. Some monitors report a preferred mode
8189          * with lower resolution than the highest refresh rate supported.
8190          */
8191
8192         m = get_highest_refresh_rate_mode(aconnector, true);
8193         if (!m)
8194                 return 0;
8195
8196         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8197                 uint64_t target_vtotal, target_vtotal_diff;
8198                 uint64_t num, den;
8199
8200                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8201                         continue;
8202
8203                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8204                     common_rates[i] > aconnector->max_vfreq * 1000)
8205                         continue;
8206
8207                 num = (unsigned long long)m->clock * 1000 * 1000;
8208                 den = common_rates[i] * (unsigned long long)m->htotal;
8209                 target_vtotal = div_u64(num, den);
8210                 target_vtotal_diff = target_vtotal - m->vtotal;
8211
8212                 /* Check for illegal modes */
8213                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8214                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8215                     m->vtotal + target_vtotal_diff < m->vsync_end)
8216                         continue;
8217
8218                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8219                 if (!new_mode)
8220                         goto out;
8221
8222                 new_mode->vtotal += (u16)target_vtotal_diff;
8223                 new_mode->vsync_start += (u16)target_vtotal_diff;
8224                 new_mode->vsync_end += (u16)target_vtotal_diff;
8225                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8226                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8227
8228                 if (!is_duplicate_mode(aconnector, new_mode)) {
8229                         drm_mode_probed_add(&aconnector->base, new_mode);
8230                         new_modes_count += 1;
8231                 } else
8232                         drm_mode_destroy(aconnector->base.dev, new_mode);
8233         }
8234  out:
8235         return new_modes_count;
8236 }
8237
8238 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8239                                                    struct edid *edid)
8240 {
8241         struct amdgpu_dm_connector *amdgpu_dm_connector =
8242                 to_amdgpu_dm_connector(connector);
8243
8244         if (!(amdgpu_freesync_vid_mode && edid))
8245                 return;
8246
8247         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8248                 amdgpu_dm_connector->num_modes +=
8249                         add_fs_modes(amdgpu_dm_connector);
8250 }
8251
8252 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8253 {
8254         struct amdgpu_dm_connector *amdgpu_dm_connector =
8255                         to_amdgpu_dm_connector(connector);
8256         struct drm_encoder *encoder;
8257         struct edid *edid = amdgpu_dm_connector->edid;
8258
8259         encoder = amdgpu_dm_connector_to_encoder(connector);
8260
8261         if (!drm_edid_is_valid(edid)) {
8262                 amdgpu_dm_connector->num_modes =
8263                                 drm_add_modes_noedid(connector, 640, 480);
8264         } else {
8265                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8266                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8267                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8268         }
8269         amdgpu_dm_fbc_init(connector);
8270
8271         return amdgpu_dm_connector->num_modes;
8272 }
8273
8274 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8275                                      struct amdgpu_dm_connector *aconnector,
8276                                      int connector_type,
8277                                      struct dc_link *link,
8278                                      int link_index)
8279 {
8280         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8281
8282         /*
8283          * Some of the properties below require access to state, like bpc.
8284          * Allocate some default initial connector state with our reset helper.
8285          */
8286         if (aconnector->base.funcs->reset)
8287                 aconnector->base.funcs->reset(&aconnector->base);
8288
8289         aconnector->connector_id = link_index;
8290         aconnector->dc_link = link;
8291         aconnector->base.interlace_allowed = false;
8292         aconnector->base.doublescan_allowed = false;
8293         aconnector->base.stereo_allowed = false;
8294         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8295         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8296         aconnector->audio_inst = -1;
8297         mutex_init(&aconnector->hpd_lock);
8298
8299         /*
8300          * configure support HPD hot plug connector_>polled default value is 0
8301          * which means HPD hot plug not supported
8302          */
8303         switch (connector_type) {
8304         case DRM_MODE_CONNECTOR_HDMIA:
8305                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8306                 aconnector->base.ycbcr_420_allowed =
8307                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8308                 break;
8309         case DRM_MODE_CONNECTOR_DisplayPort:
8310                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8311                 if (link->is_dig_mapping_flexible &&
8312                     link->dc->res_pool->funcs->link_encs_assign) {
8313                         link->link_enc =
8314                                 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8315                         if (!link->link_enc)
8316                                 link->link_enc =
8317                                         link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8318                 }
8319
8320                 if (link->link_enc)
8321                         aconnector->base.ycbcr_420_allowed =
8322                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8323                 break;
8324         case DRM_MODE_CONNECTOR_DVID:
8325                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8326                 break;
8327         default:
8328                 break;
8329         }
8330
8331         drm_object_attach_property(&aconnector->base.base,
8332                                 dm->ddev->mode_config.scaling_mode_property,
8333                                 DRM_MODE_SCALE_NONE);
8334
8335         drm_object_attach_property(&aconnector->base.base,
8336                                 adev->mode_info.underscan_property,
8337                                 UNDERSCAN_OFF);
8338         drm_object_attach_property(&aconnector->base.base,
8339                                 adev->mode_info.underscan_hborder_property,
8340                                 0);
8341         drm_object_attach_property(&aconnector->base.base,
8342                                 adev->mode_info.underscan_vborder_property,
8343                                 0);
8344
8345         if (!aconnector->mst_port)
8346                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8347
8348         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8349         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8350         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8351
8352         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8353             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8354                 drm_object_attach_property(&aconnector->base.base,
8355                                 adev->mode_info.abm_level_property, 0);
8356         }
8357
8358         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8359             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8360             connector_type == DRM_MODE_CONNECTOR_eDP) {
8361                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8362
8363                 if (!aconnector->mst_port)
8364                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8365
8366 #ifdef CONFIG_DRM_AMD_DC_HDCP
8367                 if (adev->dm.hdcp_workqueue)
8368                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8369 #endif
8370         }
8371 }
8372
8373 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8374                               struct i2c_msg *msgs, int num)
8375 {
8376         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8377         struct ddc_service *ddc_service = i2c->ddc_service;
8378         struct i2c_command cmd;
8379         int i;
8380         int result = -EIO;
8381
8382         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8383
8384         if (!cmd.payloads)
8385                 return result;
8386
8387         cmd.number_of_payloads = num;
8388         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8389         cmd.speed = 100;
8390
8391         for (i = 0; i < num; i++) {
8392                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8393                 cmd.payloads[i].address = msgs[i].addr;
8394                 cmd.payloads[i].length = msgs[i].len;
8395                 cmd.payloads[i].data = msgs[i].buf;
8396         }
8397
8398         if (dc_submit_i2c(
8399                         ddc_service->ctx->dc,
8400                         ddc_service->ddc_pin->hw_info.ddc_channel,
8401                         &cmd))
8402                 result = num;
8403
8404         kfree(cmd.payloads);
8405         return result;
8406 }
8407
8408 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8409 {
8410         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8411 }
8412
8413 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8414         .master_xfer = amdgpu_dm_i2c_xfer,
8415         .functionality = amdgpu_dm_i2c_func,
8416 };
8417
8418 static struct amdgpu_i2c_adapter *
8419 create_i2c(struct ddc_service *ddc_service,
8420            int link_index,
8421            int *res)
8422 {
8423         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8424         struct amdgpu_i2c_adapter *i2c;
8425
8426         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8427         if (!i2c)
8428                 return NULL;
8429         i2c->base.owner = THIS_MODULE;
8430         i2c->base.class = I2C_CLASS_DDC;
8431         i2c->base.dev.parent = &adev->pdev->dev;
8432         i2c->base.algo = &amdgpu_dm_i2c_algo;
8433         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8434         i2c_set_adapdata(&i2c->base, i2c);
8435         i2c->ddc_service = ddc_service;
8436         if (i2c->ddc_service->ddc_pin)
8437                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8438
8439         return i2c;
8440 }
8441
8442
8443 /*
8444  * Note: this function assumes that dc_link_detect() was called for the
8445  * dc_link which will be represented by this aconnector.
8446  */
8447 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8448                                     struct amdgpu_dm_connector *aconnector,
8449                                     uint32_t link_index,
8450                                     struct amdgpu_encoder *aencoder)
8451 {
8452         int res = 0;
8453         int connector_type;
8454         struct dc *dc = dm->dc;
8455         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8456         struct amdgpu_i2c_adapter *i2c;
8457
8458         link->priv = aconnector;
8459
8460         DRM_DEBUG_DRIVER("%s()\n", __func__);
8461
8462         i2c = create_i2c(link->ddc, link->link_index, &res);
8463         if (!i2c) {
8464                 DRM_ERROR("Failed to create i2c adapter data\n");
8465                 return -ENOMEM;
8466         }
8467
8468         aconnector->i2c = i2c;
8469         res = i2c_add_adapter(&i2c->base);
8470
8471         if (res) {
8472                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8473                 goto out_free;
8474         }
8475
8476         connector_type = to_drm_connector_type(link->connector_signal);
8477
8478         res = drm_connector_init_with_ddc(
8479                         dm->ddev,
8480                         &aconnector->base,
8481                         &amdgpu_dm_connector_funcs,
8482                         connector_type,
8483                         &i2c->base);
8484
8485         if (res) {
8486                 DRM_ERROR("connector_init failed\n");
8487                 aconnector->connector_id = -1;
8488                 goto out_free;
8489         }
8490
8491         drm_connector_helper_add(
8492                         &aconnector->base,
8493                         &amdgpu_dm_connector_helper_funcs);
8494
8495         amdgpu_dm_connector_init_helper(
8496                 dm,
8497                 aconnector,
8498                 connector_type,
8499                 link,
8500                 link_index);
8501
8502         drm_connector_attach_encoder(
8503                 &aconnector->base, &aencoder->base);
8504
8505         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8506                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8507                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8508
8509 out_free:
8510         if (res) {
8511                 kfree(i2c);
8512                 aconnector->i2c = NULL;
8513         }
8514         return res;
8515 }
8516
8517 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8518 {
8519         switch (adev->mode_info.num_crtc) {
8520         case 1:
8521                 return 0x1;
8522         case 2:
8523                 return 0x3;
8524         case 3:
8525                 return 0x7;
8526         case 4:
8527                 return 0xf;
8528         case 5:
8529                 return 0x1f;
8530         case 6:
8531         default:
8532                 return 0x3f;
8533         }
8534 }
8535
8536 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8537                                   struct amdgpu_encoder *aencoder,
8538                                   uint32_t link_index)
8539 {
8540         struct amdgpu_device *adev = drm_to_adev(dev);
8541
8542         int res = drm_encoder_init(dev,
8543                                    &aencoder->base,
8544                                    &amdgpu_dm_encoder_funcs,
8545                                    DRM_MODE_ENCODER_TMDS,
8546                                    NULL);
8547
8548         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8549
8550         if (!res)
8551                 aencoder->encoder_id = link_index;
8552         else
8553                 aencoder->encoder_id = -1;
8554
8555         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8556
8557         return res;
8558 }
8559
8560 static void manage_dm_interrupts(struct amdgpu_device *adev,
8561                                  struct amdgpu_crtc *acrtc,
8562                                  bool enable)
8563 {
8564         /*
8565          * We have no guarantee that the frontend index maps to the same
8566          * backend index - some even map to more than one.
8567          *
8568          * TODO: Use a different interrupt or check DC itself for the mapping.
8569          */
8570         int irq_type =
8571                 amdgpu_display_crtc_idx_to_irq_type(
8572                         adev,
8573                         acrtc->crtc_id);
8574
8575         if (enable) {
8576                 drm_crtc_vblank_on(&acrtc->base);
8577                 amdgpu_irq_get(
8578                         adev,
8579                         &adev->pageflip_irq,
8580                         irq_type);
8581 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8582                 amdgpu_irq_get(
8583                         adev,
8584                         &adev->vline0_irq,
8585                         irq_type);
8586 #endif
8587         } else {
8588 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8589                 amdgpu_irq_put(
8590                         adev,
8591                         &adev->vline0_irq,
8592                         irq_type);
8593 #endif
8594                 amdgpu_irq_put(
8595                         adev,
8596                         &adev->pageflip_irq,
8597                         irq_type);
8598                 drm_crtc_vblank_off(&acrtc->base);
8599         }
8600 }
8601
8602 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8603                                       struct amdgpu_crtc *acrtc)
8604 {
8605         int irq_type =
8606                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8607
8608         /**
8609          * This reads the current state for the IRQ and force reapplies
8610          * the setting to hardware.
8611          */
8612         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8613 }
8614
8615 static bool
8616 is_scaling_state_different(const struct dm_connector_state *dm_state,
8617                            const struct dm_connector_state *old_dm_state)
8618 {
8619         if (dm_state->scaling != old_dm_state->scaling)
8620                 return true;
8621         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8622                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8623                         return true;
8624         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8625                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8626                         return true;
8627         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8628                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8629                 return true;
8630         return false;
8631 }
8632
8633 #ifdef CONFIG_DRM_AMD_DC_HDCP
8634 static bool is_content_protection_different(struct drm_connector_state *state,
8635                                             const struct drm_connector_state *old_state,
8636                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8637 {
8638         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8639         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8640
8641         /* Handle: Type0/1 change */
8642         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8643             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8644                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8645                 return true;
8646         }
8647
8648         /* CP is being re enabled, ignore this
8649          *
8650          * Handles:     ENABLED -> DESIRED
8651          */
8652         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8653             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8654                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8655                 return false;
8656         }
8657
8658         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8659          *
8660          * Handles:     UNDESIRED -> ENABLED
8661          */
8662         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8663             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8664                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8665
8666         /* Stream removed and re-enabled
8667          *
8668          * Can sometimes overlap with the HPD case,
8669          * thus set update_hdcp to false to avoid
8670          * setting HDCP multiple times.
8671          *
8672          * Handles:     DESIRED -> DESIRED (Special case)
8673          */
8674         if (!(old_state->crtc && old_state->crtc->enabled) &&
8675                 state->crtc && state->crtc->enabled &&
8676                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8677                 dm_con_state->update_hdcp = false;
8678                 return true;
8679         }
8680
8681         /* Hot-plug, headless s3, dpms
8682          *
8683          * Only start HDCP if the display is connected/enabled.
8684          * update_hdcp flag will be set to false until the next
8685          * HPD comes in.
8686          *
8687          * Handles:     DESIRED -> DESIRED (Special case)
8688          */
8689         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8690             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8691                 dm_con_state->update_hdcp = false;
8692                 return true;
8693         }
8694
8695         /*
8696          * Handles:     UNDESIRED -> UNDESIRED
8697          *              DESIRED -> DESIRED
8698          *              ENABLED -> ENABLED
8699          */
8700         if (old_state->content_protection == state->content_protection)
8701                 return false;
8702
8703         /*
8704          * Handles:     UNDESIRED -> DESIRED
8705          *              DESIRED -> UNDESIRED
8706          *              ENABLED -> UNDESIRED
8707          */
8708         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8709                 return true;
8710
8711         /*
8712          * Handles:     DESIRED -> ENABLED
8713          */
8714         return false;
8715 }
8716
8717 #endif
8718 static void remove_stream(struct amdgpu_device *adev,
8719                           struct amdgpu_crtc *acrtc,
8720                           struct dc_stream_state *stream)
8721 {
8722         /* this is the update mode case */
8723
8724         acrtc->otg_inst = -1;
8725         acrtc->enabled = false;
8726 }
8727
8728 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8729                                struct dc_cursor_position *position)
8730 {
8731         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8732         int x, y;
8733         int xorigin = 0, yorigin = 0;
8734
8735         if (!crtc || !plane->state->fb)
8736                 return 0;
8737
8738         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8739             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8740                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8741                           __func__,
8742                           plane->state->crtc_w,
8743                           plane->state->crtc_h);
8744                 return -EINVAL;
8745         }
8746
8747         x = plane->state->crtc_x;
8748         y = plane->state->crtc_y;
8749
8750         if (x <= -amdgpu_crtc->max_cursor_width ||
8751             y <= -amdgpu_crtc->max_cursor_height)
8752                 return 0;
8753
8754         if (x < 0) {
8755                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8756                 x = 0;
8757         }
8758         if (y < 0) {
8759                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8760                 y = 0;
8761         }
8762         position->enable = true;
8763         position->translate_by_source = true;
8764         position->x = x;
8765         position->y = y;
8766         position->x_hotspot = xorigin;
8767         position->y_hotspot = yorigin;
8768
8769         return 0;
8770 }
8771
8772 static void handle_cursor_update(struct drm_plane *plane,
8773                                  struct drm_plane_state *old_plane_state)
8774 {
8775         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8776         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8777         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8778         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8779         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8780         uint64_t address = afb ? afb->address : 0;
8781         struct dc_cursor_position position = {0};
8782         struct dc_cursor_attributes attributes;
8783         int ret;
8784
8785         if (!plane->state->fb && !old_plane_state->fb)
8786                 return;
8787
8788         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8789                       __func__,
8790                       amdgpu_crtc->crtc_id,
8791                       plane->state->crtc_w,
8792                       plane->state->crtc_h);
8793
8794         ret = get_cursor_position(plane, crtc, &position);
8795         if (ret)
8796                 return;
8797
8798         if (!position.enable) {
8799                 /* turn off cursor */
8800                 if (crtc_state && crtc_state->stream) {
8801                         mutex_lock(&adev->dm.dc_lock);
8802                         dc_stream_set_cursor_position(crtc_state->stream,
8803                                                       &position);
8804                         mutex_unlock(&adev->dm.dc_lock);
8805                 }
8806                 return;
8807         }
8808
8809         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8810         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8811
8812         memset(&attributes, 0, sizeof(attributes));
8813         attributes.address.high_part = upper_32_bits(address);
8814         attributes.address.low_part  = lower_32_bits(address);
8815         attributes.width             = plane->state->crtc_w;
8816         attributes.height            = plane->state->crtc_h;
8817         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8818         attributes.rotation_angle    = 0;
8819         attributes.attribute_flags.value = 0;
8820
8821         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8822
8823         if (crtc_state->stream) {
8824                 mutex_lock(&adev->dm.dc_lock);
8825                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8826                                                          &attributes))
8827                         DRM_ERROR("DC failed to set cursor attributes\n");
8828
8829                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8830                                                    &position))
8831                         DRM_ERROR("DC failed to set cursor position\n");
8832                 mutex_unlock(&adev->dm.dc_lock);
8833         }
8834 }
8835
8836 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8837 {
8838
8839         assert_spin_locked(&acrtc->base.dev->event_lock);
8840         WARN_ON(acrtc->event);
8841
8842         acrtc->event = acrtc->base.state->event;
8843
8844         /* Set the flip status */
8845         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8846
8847         /* Mark this event as consumed */
8848         acrtc->base.state->event = NULL;
8849
8850         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8851                      acrtc->crtc_id);
8852 }
8853
8854 static void update_freesync_state_on_stream(
8855         struct amdgpu_display_manager *dm,
8856         struct dm_crtc_state *new_crtc_state,
8857         struct dc_stream_state *new_stream,
8858         struct dc_plane_state *surface,
8859         u32 flip_timestamp_in_us)
8860 {
8861         struct mod_vrr_params vrr_params;
8862         struct dc_info_packet vrr_infopacket = {0};
8863         struct amdgpu_device *adev = dm->adev;
8864         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8865         unsigned long flags;
8866         bool pack_sdp_v1_3 = false;
8867
8868         if (!new_stream)
8869                 return;
8870
8871         /*
8872          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8873          * For now it's sufficient to just guard against these conditions.
8874          */
8875
8876         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8877                 return;
8878
8879         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8880         vrr_params = acrtc->dm_irq_params.vrr_params;
8881
8882         if (surface) {
8883                 mod_freesync_handle_preflip(
8884                         dm->freesync_module,
8885                         surface,
8886                         new_stream,
8887                         flip_timestamp_in_us,
8888                         &vrr_params);
8889
8890                 if (adev->family < AMDGPU_FAMILY_AI &&
8891                     amdgpu_dm_vrr_active(new_crtc_state)) {
8892                         mod_freesync_handle_v_update(dm->freesync_module,
8893                                                      new_stream, &vrr_params);
8894
8895                         /* Need to call this before the frame ends. */
8896                         dc_stream_adjust_vmin_vmax(dm->dc,
8897                                                    new_crtc_state->stream,
8898                                                    &vrr_params.adjust);
8899                 }
8900         }
8901
8902         mod_freesync_build_vrr_infopacket(
8903                 dm->freesync_module,
8904                 new_stream,
8905                 &vrr_params,
8906                 PACKET_TYPE_VRR,
8907                 TRANSFER_FUNC_UNKNOWN,
8908                 &vrr_infopacket,
8909                 pack_sdp_v1_3);
8910
8911         new_crtc_state->freesync_timing_changed |=
8912                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8913                         &vrr_params.adjust,
8914                         sizeof(vrr_params.adjust)) != 0);
8915
8916         new_crtc_state->freesync_vrr_info_changed |=
8917                 (memcmp(&new_crtc_state->vrr_infopacket,
8918                         &vrr_infopacket,
8919                         sizeof(vrr_infopacket)) != 0);
8920
8921         acrtc->dm_irq_params.vrr_params = vrr_params;
8922         new_crtc_state->vrr_infopacket = vrr_infopacket;
8923
8924         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8925         new_stream->vrr_infopacket = vrr_infopacket;
8926
8927         if (new_crtc_state->freesync_vrr_info_changed)
8928                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8929                               new_crtc_state->base.crtc->base.id,
8930                               (int)new_crtc_state->base.vrr_enabled,
8931                               (int)vrr_params.state);
8932
8933         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8934 }
8935
8936 static void update_stream_irq_parameters(
8937         struct amdgpu_display_manager *dm,
8938         struct dm_crtc_state *new_crtc_state)
8939 {
8940         struct dc_stream_state *new_stream = new_crtc_state->stream;
8941         struct mod_vrr_params vrr_params;
8942         struct mod_freesync_config config = new_crtc_state->freesync_config;
8943         struct amdgpu_device *adev = dm->adev;
8944         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8945         unsigned long flags;
8946
8947         if (!new_stream)
8948                 return;
8949
8950         /*
8951          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8952          * For now it's sufficient to just guard against these conditions.
8953          */
8954         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8955                 return;
8956
8957         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8958         vrr_params = acrtc->dm_irq_params.vrr_params;
8959
8960         if (new_crtc_state->vrr_supported &&
8961             config.min_refresh_in_uhz &&
8962             config.max_refresh_in_uhz) {
8963                 /*
8964                  * if freesync compatible mode was set, config.state will be set
8965                  * in atomic check
8966                  */
8967                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8968                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8969                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8970                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8971                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8972                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8973                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8974                 } else {
8975                         config.state = new_crtc_state->base.vrr_enabled ?
8976                                                      VRR_STATE_ACTIVE_VARIABLE :
8977                                                      VRR_STATE_INACTIVE;
8978                 }
8979         } else {
8980                 config.state = VRR_STATE_UNSUPPORTED;
8981         }
8982
8983         mod_freesync_build_vrr_params(dm->freesync_module,
8984                                       new_stream,
8985                                       &config, &vrr_params);
8986
8987         new_crtc_state->freesync_timing_changed |=
8988                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8989                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8990
8991         new_crtc_state->freesync_config = config;
8992         /* Copy state for access from DM IRQ handler */
8993         acrtc->dm_irq_params.freesync_config = config;
8994         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8995         acrtc->dm_irq_params.vrr_params = vrr_params;
8996         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8997 }
8998
8999 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9000                                             struct dm_crtc_state *new_state)
9001 {
9002         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9003         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9004
9005         if (!old_vrr_active && new_vrr_active) {
9006                 /* Transition VRR inactive -> active:
9007                  * While VRR is active, we must not disable vblank irq, as a
9008                  * reenable after disable would compute bogus vblank/pflip
9009                  * timestamps if it likely happened inside display front-porch.
9010                  *
9011                  * We also need vupdate irq for the actual core vblank handling
9012                  * at end of vblank.
9013                  */
9014                 dm_set_vupdate_irq(new_state->base.crtc, true);
9015                 drm_crtc_vblank_get(new_state->base.crtc);
9016                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9017                                  __func__, new_state->base.crtc->base.id);
9018         } else if (old_vrr_active && !new_vrr_active) {
9019                 /* Transition VRR active -> inactive:
9020                  * Allow vblank irq disable again for fixed refresh rate.
9021                  */
9022                 dm_set_vupdate_irq(new_state->base.crtc, false);
9023                 drm_crtc_vblank_put(new_state->base.crtc);
9024                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9025                                  __func__, new_state->base.crtc->base.id);
9026         }
9027 }
9028
9029 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9030 {
9031         struct drm_plane *plane;
9032         struct drm_plane_state *old_plane_state;
9033         int i;
9034
9035         /*
9036          * TODO: Make this per-stream so we don't issue redundant updates for
9037          * commits with multiple streams.
9038          */
9039         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9040                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9041                         handle_cursor_update(plane, old_plane_state);
9042 }
9043
9044 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9045                                     struct dc_state *dc_state,
9046                                     struct drm_device *dev,
9047                                     struct amdgpu_display_manager *dm,
9048                                     struct drm_crtc *pcrtc,
9049                                     bool wait_for_vblank)
9050 {
9051         uint32_t i;
9052         uint64_t timestamp_ns;
9053         struct drm_plane *plane;
9054         struct drm_plane_state *old_plane_state, *new_plane_state;
9055         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9056         struct drm_crtc_state *new_pcrtc_state =
9057                         drm_atomic_get_new_crtc_state(state, pcrtc);
9058         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9059         struct dm_crtc_state *dm_old_crtc_state =
9060                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9061         int planes_count = 0, vpos, hpos;
9062         long r;
9063         unsigned long flags;
9064         struct amdgpu_bo *abo;
9065         uint32_t target_vblank, last_flip_vblank;
9066         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9067         bool pflip_present = false;
9068         struct {
9069                 struct dc_surface_update surface_updates[MAX_SURFACES];
9070                 struct dc_plane_info plane_infos[MAX_SURFACES];
9071                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9072                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9073                 struct dc_stream_update stream_update;
9074         } *bundle;
9075
9076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9077
9078         if (!bundle) {
9079                 dm_error("Failed to allocate update bundle\n");
9080                 goto cleanup;
9081         }
9082
9083         /*
9084          * Disable the cursor first if we're disabling all the planes.
9085          * It'll remain on the screen after the planes are re-enabled
9086          * if we don't.
9087          */
9088         if (acrtc_state->active_planes == 0)
9089                 amdgpu_dm_commit_cursors(state);
9090
9091         /* update planes when needed */
9092         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9093                 struct drm_crtc *crtc = new_plane_state->crtc;
9094                 struct drm_crtc_state *new_crtc_state;
9095                 struct drm_framebuffer *fb = new_plane_state->fb;
9096                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9097                 bool plane_needs_flip;
9098                 struct dc_plane_state *dc_plane;
9099                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9100
9101                 /* Cursor plane is handled after stream updates */
9102                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9103                         continue;
9104
9105                 if (!fb || !crtc || pcrtc != crtc)
9106                         continue;
9107
9108                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9109                 if (!new_crtc_state->active)
9110                         continue;
9111
9112                 dc_plane = dm_new_plane_state->dc_state;
9113
9114                 bundle->surface_updates[planes_count].surface = dc_plane;
9115                 if (new_pcrtc_state->color_mgmt_changed) {
9116                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9117                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9118                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9119                 }
9120
9121                 fill_dc_scaling_info(dm->adev, new_plane_state,
9122                                      &bundle->scaling_infos[planes_count]);
9123
9124                 bundle->surface_updates[planes_count].scaling_info =
9125                         &bundle->scaling_infos[planes_count];
9126
9127                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9128
9129                 pflip_present = pflip_present || plane_needs_flip;
9130
9131                 if (!plane_needs_flip) {
9132                         planes_count += 1;
9133                         continue;
9134                 }
9135
9136                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9137
9138                 /*
9139                  * Wait for all fences on this FB. Do limited wait to avoid
9140                  * deadlock during GPU reset when this fence will not signal
9141                  * but we hold reservation lock for the BO.
9142                  */
9143                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9144                                           msecs_to_jiffies(5000));
9145                 if (unlikely(r <= 0))
9146                         DRM_ERROR("Waiting for fences timed out!");
9147
9148                 fill_dc_plane_info_and_addr(
9149                         dm->adev, new_plane_state,
9150                         afb->tiling_flags,
9151                         &bundle->plane_infos[planes_count],
9152                         &bundle->flip_addrs[planes_count].address,
9153                         afb->tmz_surface, false);
9154
9155                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9156                                  new_plane_state->plane->index,
9157                                  bundle->plane_infos[planes_count].dcc.enable);
9158
9159                 bundle->surface_updates[planes_count].plane_info =
9160                         &bundle->plane_infos[planes_count];
9161
9162                 /*
9163                  * Only allow immediate flips for fast updates that don't
9164                  * change FB pitch, DCC state, rotation or mirroing.
9165                  */
9166                 bundle->flip_addrs[planes_count].flip_immediate =
9167                         crtc->state->async_flip &&
9168                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9169
9170                 timestamp_ns = ktime_get_ns();
9171                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9172                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9173                 bundle->surface_updates[planes_count].surface = dc_plane;
9174
9175                 if (!bundle->surface_updates[planes_count].surface) {
9176                         DRM_ERROR("No surface for CRTC: id=%d\n",
9177                                         acrtc_attach->crtc_id);
9178                         continue;
9179                 }
9180
9181                 if (plane == pcrtc->primary)
9182                         update_freesync_state_on_stream(
9183                                 dm,
9184                                 acrtc_state,
9185                                 acrtc_state->stream,
9186                                 dc_plane,
9187                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9188
9189                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9190                                  __func__,
9191                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9192                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9193
9194                 planes_count += 1;
9195
9196         }
9197
9198         if (pflip_present) {
9199                 if (!vrr_active) {
9200                         /* Use old throttling in non-vrr fixed refresh rate mode
9201                          * to keep flip scheduling based on target vblank counts
9202                          * working in a backwards compatible way, e.g., for
9203                          * clients using the GLX_OML_sync_control extension or
9204                          * DRI3/Present extension with defined target_msc.
9205                          */
9206                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9207                 }
9208                 else {
9209                         /* For variable refresh rate mode only:
9210                          * Get vblank of last completed flip to avoid > 1 vrr
9211                          * flips per video frame by use of throttling, but allow
9212                          * flip programming anywhere in the possibly large
9213                          * variable vrr vblank interval for fine-grained flip
9214                          * timing control and more opportunity to avoid stutter
9215                          * on late submission of flips.
9216                          */
9217                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9218                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9219                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9220                 }
9221
9222                 target_vblank = last_flip_vblank + wait_for_vblank;
9223
9224                 /*
9225                  * Wait until we're out of the vertical blank period before the one
9226                  * targeted by the flip
9227                  */
9228                 while ((acrtc_attach->enabled &&
9229                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9230                                                             0, &vpos, &hpos, NULL,
9231                                                             NULL, &pcrtc->hwmode)
9232                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9233                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9234                         (int)(target_vblank -
9235                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9236                         usleep_range(1000, 1100);
9237                 }
9238
9239                 /**
9240                  * Prepare the flip event for the pageflip interrupt to handle.
9241                  *
9242                  * This only works in the case where we've already turned on the
9243                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9244                  * from 0 -> n planes we have to skip a hardware generated event
9245                  * and rely on sending it from software.
9246                  */
9247                 if (acrtc_attach->base.state->event &&
9248                     acrtc_state->active_planes > 0 &&
9249                     !acrtc_state->force_dpms_off) {
9250                         drm_crtc_vblank_get(pcrtc);
9251
9252                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9253
9254                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9255                         prepare_flip_isr(acrtc_attach);
9256
9257                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9258                 }
9259
9260                 if (acrtc_state->stream) {
9261                         if (acrtc_state->freesync_vrr_info_changed)
9262                                 bundle->stream_update.vrr_infopacket =
9263                                         &acrtc_state->stream->vrr_infopacket;
9264                 }
9265         }
9266
9267         /* Update the planes if changed or disable if we don't have any. */
9268         if ((planes_count || acrtc_state->active_planes == 0) &&
9269                 acrtc_state->stream) {
9270 #if defined(CONFIG_DRM_AMD_DC_DCN)
9271                 /*
9272                  * If PSR or idle optimizations are enabled then flush out
9273                  * any pending work before hardware programming.
9274                  */
9275                 if (dm->vblank_control_workqueue)
9276                         flush_workqueue(dm->vblank_control_workqueue);
9277 #endif
9278
9279                 bundle->stream_update.stream = acrtc_state->stream;
9280                 if (new_pcrtc_state->mode_changed) {
9281                         bundle->stream_update.src = acrtc_state->stream->src;
9282                         bundle->stream_update.dst = acrtc_state->stream->dst;
9283                 }
9284
9285                 if (new_pcrtc_state->color_mgmt_changed) {
9286                         /*
9287                          * TODO: This isn't fully correct since we've actually
9288                          * already modified the stream in place.
9289                          */
9290                         bundle->stream_update.gamut_remap =
9291                                 &acrtc_state->stream->gamut_remap_matrix;
9292                         bundle->stream_update.output_csc_transform =
9293                                 &acrtc_state->stream->csc_color_matrix;
9294                         bundle->stream_update.out_transfer_func =
9295                                 acrtc_state->stream->out_transfer_func;
9296                 }
9297
9298                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9299                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9300                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9301
9302                 /*
9303                  * If FreeSync state on the stream has changed then we need to
9304                  * re-adjust the min/max bounds now that DC doesn't handle this
9305                  * as part of commit.
9306                  */
9307                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9308                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9309                         dc_stream_adjust_vmin_vmax(
9310                                 dm->dc, acrtc_state->stream,
9311                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9312                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9313                 }
9314                 mutex_lock(&dm->dc_lock);
9315                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9316                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9317                         amdgpu_dm_psr_disable(acrtc_state->stream);
9318
9319                 dc_commit_updates_for_stream(dm->dc,
9320                                                      bundle->surface_updates,
9321                                                      planes_count,
9322                                                      acrtc_state->stream,
9323                                                      &bundle->stream_update,
9324                                                      dc_state);
9325
9326                 /**
9327                  * Enable or disable the interrupts on the backend.
9328                  *
9329                  * Most pipes are put into power gating when unused.
9330                  *
9331                  * When power gating is enabled on a pipe we lose the
9332                  * interrupt enablement state when power gating is disabled.
9333                  *
9334                  * So we need to update the IRQ control state in hardware
9335                  * whenever the pipe turns on (since it could be previously
9336                  * power gated) or off (since some pipes can't be power gated
9337                  * on some ASICs).
9338                  */
9339                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9340                         dm_update_pflip_irq_state(drm_to_adev(dev),
9341                                                   acrtc_attach);
9342
9343                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9344                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9345                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9346                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9347
9348                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9349                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9350                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9351                         struct amdgpu_dm_connector *aconn =
9352                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9353
9354                         if (aconn->psr_skip_count > 0)
9355                                 aconn->psr_skip_count--;
9356
9357                         /* Allow PSR when skip count is 0. */
9358                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9359                 } else {
9360                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9361                 }
9362
9363                 mutex_unlock(&dm->dc_lock);
9364         }
9365
9366         /*
9367          * Update cursor state *after* programming all the planes.
9368          * This avoids redundant programming in the case where we're going
9369          * to be disabling a single plane - those pipes are being disabled.
9370          */
9371         if (acrtc_state->active_planes)
9372                 amdgpu_dm_commit_cursors(state);
9373
9374 cleanup:
9375         kfree(bundle);
9376 }
9377
9378 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9379                                    struct drm_atomic_state *state)
9380 {
9381         struct amdgpu_device *adev = drm_to_adev(dev);
9382         struct amdgpu_dm_connector *aconnector;
9383         struct drm_connector *connector;
9384         struct drm_connector_state *old_con_state, *new_con_state;
9385         struct drm_crtc_state *new_crtc_state;
9386         struct dm_crtc_state *new_dm_crtc_state;
9387         const struct dc_stream_status *status;
9388         int i, inst;
9389
9390         /* Notify device removals. */
9391         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9392                 if (old_con_state->crtc != new_con_state->crtc) {
9393                         /* CRTC changes require notification. */
9394                         goto notify;
9395                 }
9396
9397                 if (!new_con_state->crtc)
9398                         continue;
9399
9400                 new_crtc_state = drm_atomic_get_new_crtc_state(
9401                         state, new_con_state->crtc);
9402
9403                 if (!new_crtc_state)
9404                         continue;
9405
9406                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9407                         continue;
9408
9409         notify:
9410                 aconnector = to_amdgpu_dm_connector(connector);
9411
9412                 mutex_lock(&adev->dm.audio_lock);
9413                 inst = aconnector->audio_inst;
9414                 aconnector->audio_inst = -1;
9415                 mutex_unlock(&adev->dm.audio_lock);
9416
9417                 amdgpu_dm_audio_eld_notify(adev, inst);
9418         }
9419
9420         /* Notify audio device additions. */
9421         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9422                 if (!new_con_state->crtc)
9423                         continue;
9424
9425                 new_crtc_state = drm_atomic_get_new_crtc_state(
9426                         state, new_con_state->crtc);
9427
9428                 if (!new_crtc_state)
9429                         continue;
9430
9431                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9432                         continue;
9433
9434                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9435                 if (!new_dm_crtc_state->stream)
9436                         continue;
9437
9438                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9439                 if (!status)
9440                         continue;
9441
9442                 aconnector = to_amdgpu_dm_connector(connector);
9443
9444                 mutex_lock(&adev->dm.audio_lock);
9445                 inst = status->audio_inst;
9446                 aconnector->audio_inst = inst;
9447                 mutex_unlock(&adev->dm.audio_lock);
9448
9449                 amdgpu_dm_audio_eld_notify(adev, inst);
9450         }
9451 }
9452
9453 /*
9454  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9455  * @crtc_state: the DRM CRTC state
9456  * @stream_state: the DC stream state.
9457  *
9458  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9459  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9460  */
9461 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9462                                                 struct dc_stream_state *stream_state)
9463 {
9464         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9465 }
9466
9467 /**
9468  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9469  * @state: The atomic state to commit
9470  *
9471  * This will tell DC to commit the constructed DC state from atomic_check,
9472  * programming the hardware. Any failures here implies a hardware failure, since
9473  * atomic check should have filtered anything non-kosher.
9474  */
9475 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9476 {
9477         struct drm_device *dev = state->dev;
9478         struct amdgpu_device *adev = drm_to_adev(dev);
9479         struct amdgpu_display_manager *dm = &adev->dm;
9480         struct dm_atomic_state *dm_state;
9481         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9482         uint32_t i, j;
9483         struct drm_crtc *crtc;
9484         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9485         unsigned long flags;
9486         bool wait_for_vblank = true;
9487         struct drm_connector *connector;
9488         struct drm_connector_state *old_con_state, *new_con_state;
9489         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9490         int crtc_disable_count = 0;
9491         bool mode_set_reset_required = false;
9492
9493         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9494
9495         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9496
9497         dm_state = dm_atomic_get_new_state(state);
9498         if (dm_state && dm_state->context) {
9499                 dc_state = dm_state->context;
9500         } else {
9501                 /* No state changes, retain current state. */
9502                 dc_state_temp = dc_create_state(dm->dc);
9503                 ASSERT(dc_state_temp);
9504                 dc_state = dc_state_temp;
9505                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9506         }
9507
9508         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9509                                        new_crtc_state, i) {
9510                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9511
9512                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9513
9514                 if (old_crtc_state->active &&
9515                     (!new_crtc_state->active ||
9516                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9517                         manage_dm_interrupts(adev, acrtc, false);
9518                         dc_stream_release(dm_old_crtc_state->stream);
9519                 }
9520         }
9521
9522         drm_atomic_helper_calc_timestamping_constants(state);
9523
9524         /* update changed items */
9525         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9526                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9527
9528                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9529                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9530
9531                 DRM_DEBUG_ATOMIC(
9532                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9533                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9534                         "connectors_changed:%d\n",
9535                         acrtc->crtc_id,
9536                         new_crtc_state->enable,
9537                         new_crtc_state->active,
9538                         new_crtc_state->planes_changed,
9539                         new_crtc_state->mode_changed,
9540                         new_crtc_state->active_changed,
9541                         new_crtc_state->connectors_changed);
9542
9543                 /* Disable cursor if disabling crtc */
9544                 if (old_crtc_state->active && !new_crtc_state->active) {
9545                         struct dc_cursor_position position;
9546
9547                         memset(&position, 0, sizeof(position));
9548                         mutex_lock(&dm->dc_lock);
9549                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9550                         mutex_unlock(&dm->dc_lock);
9551                 }
9552
9553                 /* Copy all transient state flags into dc state */
9554                 if (dm_new_crtc_state->stream) {
9555                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9556                                                             dm_new_crtc_state->stream);
9557                 }
9558
9559                 /* handles headless hotplug case, updating new_state and
9560                  * aconnector as needed
9561                  */
9562
9563                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9564
9565                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9566
9567                         if (!dm_new_crtc_state->stream) {
9568                                 /*
9569                                  * this could happen because of issues with
9570                                  * userspace notifications delivery.
9571                                  * In this case userspace tries to set mode on
9572                                  * display which is disconnected in fact.
9573                                  * dc_sink is NULL in this case on aconnector.
9574                                  * We expect reset mode will come soon.
9575                                  *
9576                                  * This can also happen when unplug is done
9577                                  * during resume sequence ended
9578                                  *
9579                                  * In this case, we want to pretend we still
9580                                  * have a sink to keep the pipe running so that
9581                                  * hw state is consistent with the sw state
9582                                  */
9583                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9584                                                 __func__, acrtc->base.base.id);
9585                                 continue;
9586                         }
9587
9588                         if (dm_old_crtc_state->stream)
9589                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9590
9591                         pm_runtime_get_noresume(dev->dev);
9592
9593                         acrtc->enabled = true;
9594                         acrtc->hw_mode = new_crtc_state->mode;
9595                         crtc->hwmode = new_crtc_state->mode;
9596                         mode_set_reset_required = true;
9597                 } else if (modereset_required(new_crtc_state)) {
9598                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9599                         /* i.e. reset mode */
9600                         if (dm_old_crtc_state->stream)
9601                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9602
9603                         mode_set_reset_required = true;
9604                 }
9605         } /* for_each_crtc_in_state() */
9606
9607         if (dc_state) {
9608                 /* if there mode set or reset, disable eDP PSR */
9609                 if (mode_set_reset_required) {
9610 #if defined(CONFIG_DRM_AMD_DC_DCN)
9611                         if (dm->vblank_control_workqueue)
9612                                 flush_workqueue(dm->vblank_control_workqueue);
9613 #endif
9614                         amdgpu_dm_psr_disable_all(dm);
9615                 }
9616
9617                 dm_enable_per_frame_crtc_master_sync(dc_state);
9618                 mutex_lock(&dm->dc_lock);
9619                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9620 #if defined(CONFIG_DRM_AMD_DC_DCN)
9621                /* Allow idle optimization when vblank count is 0 for display off */
9622                if (dm->active_vblank_irq_count == 0)
9623                    dc_allow_idle_optimizations(dm->dc,true);
9624 #endif
9625                 mutex_unlock(&dm->dc_lock);
9626         }
9627
9628         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9629                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9630
9631                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9632
9633                 if (dm_new_crtc_state->stream != NULL) {
9634                         const struct dc_stream_status *status =
9635                                         dc_stream_get_status(dm_new_crtc_state->stream);
9636
9637                         if (!status)
9638                                 status = dc_stream_get_status_from_state(dc_state,
9639                                                                          dm_new_crtc_state->stream);
9640                         if (!status)
9641                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9642                         else
9643                                 acrtc->otg_inst = status->primary_otg_inst;
9644                 }
9645         }
9646 #ifdef CONFIG_DRM_AMD_DC_HDCP
9647         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9648                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9649                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9650                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9651
9652                 new_crtc_state = NULL;
9653
9654                 if (acrtc)
9655                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9656
9657                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9658
9659                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9660                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9661                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9662                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9663                         dm_new_con_state->update_hdcp = true;
9664                         continue;
9665                 }
9666
9667                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9668                         hdcp_update_display(
9669                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9670                                 new_con_state->hdcp_content_type,
9671                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9672         }
9673 #endif
9674
9675         /* Handle connector state changes */
9676         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9677                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9678                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9679                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9680                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9681                 struct dc_stream_update stream_update;
9682                 struct dc_info_packet hdr_packet;
9683                 struct dc_stream_status *status = NULL;
9684                 bool abm_changed, hdr_changed, scaling_changed;
9685
9686                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9687                 memset(&stream_update, 0, sizeof(stream_update));
9688
9689                 if (acrtc) {
9690                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9691                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9692                 }
9693
9694                 /* Skip any modesets/resets */
9695                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9696                         continue;
9697
9698                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9699                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9700
9701                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9702                                                              dm_old_con_state);
9703
9704                 abm_changed = dm_new_crtc_state->abm_level !=
9705                               dm_old_crtc_state->abm_level;
9706
9707                 hdr_changed =
9708                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9709
9710                 if (!scaling_changed && !abm_changed && !hdr_changed)
9711                         continue;
9712
9713                 stream_update.stream = dm_new_crtc_state->stream;
9714                 if (scaling_changed) {
9715                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9716                                         dm_new_con_state, dm_new_crtc_state->stream);
9717
9718                         stream_update.src = dm_new_crtc_state->stream->src;
9719                         stream_update.dst = dm_new_crtc_state->stream->dst;
9720                 }
9721
9722                 if (abm_changed) {
9723                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9724
9725                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9726                 }
9727
9728                 if (hdr_changed) {
9729                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9730                         stream_update.hdr_static_metadata = &hdr_packet;
9731                 }
9732
9733                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9734
9735                 if (WARN_ON(!status))
9736                         continue;
9737
9738                 WARN_ON(!status->plane_count);
9739
9740                 /*
9741                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9742                  * Here we create an empty update on each plane.
9743                  * To fix this, DC should permit updating only stream properties.
9744                  */
9745                 for (j = 0; j < status->plane_count; j++)
9746                         dummy_updates[j].surface = status->plane_states[0];
9747
9748
9749                 mutex_lock(&dm->dc_lock);
9750                 dc_commit_updates_for_stream(dm->dc,
9751                                                      dummy_updates,
9752                                                      status->plane_count,
9753                                                      dm_new_crtc_state->stream,
9754                                                      &stream_update,
9755                                                      dc_state);
9756                 mutex_unlock(&dm->dc_lock);
9757         }
9758
9759         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9760         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9761                                       new_crtc_state, i) {
9762                 if (old_crtc_state->active && !new_crtc_state->active)
9763                         crtc_disable_count++;
9764
9765                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9766                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9767
9768                 /* For freesync config update on crtc state and params for irq */
9769                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9770
9771                 /* Handle vrr on->off / off->on transitions */
9772                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9773                                                 dm_new_crtc_state);
9774         }
9775
9776         /**
9777          * Enable interrupts for CRTCs that are newly enabled or went through
9778          * a modeset. It was intentionally deferred until after the front end
9779          * state was modified to wait until the OTG was on and so the IRQ
9780          * handlers didn't access stale or invalid state.
9781          */
9782         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9783                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9784 #ifdef CONFIG_DEBUG_FS
9785                 bool configure_crc = false;
9786                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9787 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9788                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9789 #endif
9790                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9791                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9792                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9793 #endif
9794                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9795
9796                 if (new_crtc_state->active &&
9797                     (!old_crtc_state->active ||
9798                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9799                         dc_stream_retain(dm_new_crtc_state->stream);
9800                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9801                         manage_dm_interrupts(adev, acrtc, true);
9802
9803 #ifdef CONFIG_DEBUG_FS
9804                         /**
9805                          * Frontend may have changed so reapply the CRC capture
9806                          * settings for the stream.
9807                          */
9808                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9809
9810                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9811                                 configure_crc = true;
9812 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9813                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9814                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9815                                         acrtc->dm_irq_params.crc_window.update_win = true;
9816                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9817                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9818                                         crc_rd_wrk->crtc = crtc;
9819                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9820                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9821                                 }
9822 #endif
9823                         }
9824
9825                         if (configure_crc)
9826                                 if (amdgpu_dm_crtc_configure_crc_source(
9827                                         crtc, dm_new_crtc_state, cur_crc_src))
9828                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9829 #endif
9830                 }
9831         }
9832
9833         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9834                 if (new_crtc_state->async_flip)
9835                         wait_for_vblank = false;
9836
9837         /* update planes when needed per crtc*/
9838         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9839                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9840
9841                 if (dm_new_crtc_state->stream)
9842                         amdgpu_dm_commit_planes(state, dc_state, dev,
9843                                                 dm, crtc, wait_for_vblank);
9844         }
9845
9846         /* Update audio instances for each connector. */
9847         amdgpu_dm_commit_audio(dev, state);
9848
9849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9850         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9851         /* restore the backlight level */
9852         for (i = 0; i < dm->num_of_edps; i++) {
9853                 if (dm->backlight_dev[i] &&
9854                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9855                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9856         }
9857 #endif
9858         /*
9859          * send vblank event on all events not handled in flip and
9860          * mark consumed event for drm_atomic_helper_commit_hw_done
9861          */
9862         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9863         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9864
9865                 if (new_crtc_state->event)
9866                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9867
9868                 new_crtc_state->event = NULL;
9869         }
9870         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9871
9872         /* Signal HW programming completion */
9873         drm_atomic_helper_commit_hw_done(state);
9874
9875         if (wait_for_vblank)
9876                 drm_atomic_helper_wait_for_flip_done(dev, state);
9877
9878         drm_atomic_helper_cleanup_planes(dev, state);
9879
9880         /* return the stolen vga memory back to VRAM */
9881         if (!adev->mman.keep_stolen_vga_memory)
9882                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9883         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9884
9885         /*
9886          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9887          * so we can put the GPU into runtime suspend if we're not driving any
9888          * displays anymore
9889          */
9890         for (i = 0; i < crtc_disable_count; i++)
9891                 pm_runtime_put_autosuspend(dev->dev);
9892         pm_runtime_mark_last_busy(dev->dev);
9893
9894         if (dc_state_temp)
9895                 dc_release_state(dc_state_temp);
9896 }
9897
9898
9899 static int dm_force_atomic_commit(struct drm_connector *connector)
9900 {
9901         int ret = 0;
9902         struct drm_device *ddev = connector->dev;
9903         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9904         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9905         struct drm_plane *plane = disconnected_acrtc->base.primary;
9906         struct drm_connector_state *conn_state;
9907         struct drm_crtc_state *crtc_state;
9908         struct drm_plane_state *plane_state;
9909
9910         if (!state)
9911                 return -ENOMEM;
9912
9913         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9914
9915         /* Construct an atomic state to restore previous display setting */
9916
9917         /*
9918          * Attach connectors to drm_atomic_state
9919          */
9920         conn_state = drm_atomic_get_connector_state(state, connector);
9921
9922         ret = PTR_ERR_OR_ZERO(conn_state);
9923         if (ret)
9924                 goto out;
9925
9926         /* Attach crtc to drm_atomic_state*/
9927         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9928
9929         ret = PTR_ERR_OR_ZERO(crtc_state);
9930         if (ret)
9931                 goto out;
9932
9933         /* force a restore */
9934         crtc_state->mode_changed = true;
9935
9936         /* Attach plane to drm_atomic_state */
9937         plane_state = drm_atomic_get_plane_state(state, plane);
9938
9939         ret = PTR_ERR_OR_ZERO(plane_state);
9940         if (ret)
9941                 goto out;
9942
9943         /* Call commit internally with the state we just constructed */
9944         ret = drm_atomic_commit(state);
9945
9946 out:
9947         drm_atomic_state_put(state);
9948         if (ret)
9949                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9950
9951         return ret;
9952 }
9953
9954 /*
9955  * This function handles all cases when set mode does not come upon hotplug.
9956  * This includes when a display is unplugged then plugged back into the
9957  * same port and when running without usermode desktop manager supprot
9958  */
9959 void dm_restore_drm_connector_state(struct drm_device *dev,
9960                                     struct drm_connector *connector)
9961 {
9962         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9963         struct amdgpu_crtc *disconnected_acrtc;
9964         struct dm_crtc_state *acrtc_state;
9965
9966         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9967                 return;
9968
9969         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9970         if (!disconnected_acrtc)
9971                 return;
9972
9973         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9974         if (!acrtc_state->stream)
9975                 return;
9976
9977         /*
9978          * If the previous sink is not released and different from the current,
9979          * we deduce we are in a state where we can not rely on usermode call
9980          * to turn on the display, so we do it here
9981          */
9982         if (acrtc_state->stream->sink != aconnector->dc_sink)
9983                 dm_force_atomic_commit(&aconnector->base);
9984 }
9985
9986 /*
9987  * Grabs all modesetting locks to serialize against any blocking commits,
9988  * Waits for completion of all non blocking commits.
9989  */
9990 static int do_aquire_global_lock(struct drm_device *dev,
9991                                  struct drm_atomic_state *state)
9992 {
9993         struct drm_crtc *crtc;
9994         struct drm_crtc_commit *commit;
9995         long ret;
9996
9997         /*
9998          * Adding all modeset locks to aquire_ctx will
9999          * ensure that when the framework release it the
10000          * extra locks we are locking here will get released to
10001          */
10002         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10003         if (ret)
10004                 return ret;
10005
10006         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10007                 spin_lock(&crtc->commit_lock);
10008                 commit = list_first_entry_or_null(&crtc->commit_list,
10009                                 struct drm_crtc_commit, commit_entry);
10010                 if (commit)
10011                         drm_crtc_commit_get(commit);
10012                 spin_unlock(&crtc->commit_lock);
10013
10014                 if (!commit)
10015                         continue;
10016
10017                 /*
10018                  * Make sure all pending HW programming completed and
10019                  * page flips done
10020                  */
10021                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10022
10023                 if (ret > 0)
10024                         ret = wait_for_completion_interruptible_timeout(
10025                                         &commit->flip_done, 10*HZ);
10026
10027                 if (ret == 0)
10028                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10029                                   "timed out\n", crtc->base.id, crtc->name);
10030
10031                 drm_crtc_commit_put(commit);
10032         }
10033
10034         return ret < 0 ? ret : 0;
10035 }
10036
10037 static void get_freesync_config_for_crtc(
10038         struct dm_crtc_state *new_crtc_state,
10039         struct dm_connector_state *new_con_state)
10040 {
10041         struct mod_freesync_config config = {0};
10042         struct amdgpu_dm_connector *aconnector =
10043                         to_amdgpu_dm_connector(new_con_state->base.connector);
10044         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10045         int vrefresh = drm_mode_vrefresh(mode);
10046         bool fs_vid_mode = false;
10047
10048         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10049                                         vrefresh >= aconnector->min_vfreq &&
10050                                         vrefresh <= aconnector->max_vfreq;
10051
10052         if (new_crtc_state->vrr_supported) {
10053                 new_crtc_state->stream->ignore_msa_timing_param = true;
10054                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10055
10056                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10057                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10058                 config.vsif_supported = true;
10059                 config.btr = true;
10060
10061                 if (fs_vid_mode) {
10062                         config.state = VRR_STATE_ACTIVE_FIXED;
10063                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10064                         goto out;
10065                 } else if (new_crtc_state->base.vrr_enabled) {
10066                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10067                 } else {
10068                         config.state = VRR_STATE_INACTIVE;
10069                 }
10070         }
10071 out:
10072         new_crtc_state->freesync_config = config;
10073 }
10074
10075 static void reset_freesync_config_for_crtc(
10076         struct dm_crtc_state *new_crtc_state)
10077 {
10078         new_crtc_state->vrr_supported = false;
10079
10080         memset(&new_crtc_state->vrr_infopacket, 0,
10081                sizeof(new_crtc_state->vrr_infopacket));
10082 }
10083
10084 static bool
10085 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10086                                  struct drm_crtc_state *new_crtc_state)
10087 {
10088         struct drm_display_mode old_mode, new_mode;
10089
10090         if (!old_crtc_state || !new_crtc_state)
10091                 return false;
10092
10093         old_mode = old_crtc_state->mode;
10094         new_mode = new_crtc_state->mode;
10095
10096         if (old_mode.clock       == new_mode.clock &&
10097             old_mode.hdisplay    == new_mode.hdisplay &&
10098             old_mode.vdisplay    == new_mode.vdisplay &&
10099             old_mode.htotal      == new_mode.htotal &&
10100             old_mode.vtotal      != new_mode.vtotal &&
10101             old_mode.hsync_start == new_mode.hsync_start &&
10102             old_mode.vsync_start != new_mode.vsync_start &&
10103             old_mode.hsync_end   == new_mode.hsync_end &&
10104             old_mode.vsync_end   != new_mode.vsync_end &&
10105             old_mode.hskew       == new_mode.hskew &&
10106             old_mode.vscan       == new_mode.vscan &&
10107             (old_mode.vsync_end - old_mode.vsync_start) ==
10108             (new_mode.vsync_end - new_mode.vsync_start))
10109                 return true;
10110
10111         return false;
10112 }
10113
10114 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10115         uint64_t num, den, res;
10116         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10117
10118         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10119
10120         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10121         den = (unsigned long long)new_crtc_state->mode.htotal *
10122               (unsigned long long)new_crtc_state->mode.vtotal;
10123
10124         res = div_u64(num, den);
10125         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10126 }
10127
10128 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10129                                 struct drm_atomic_state *state,
10130                                 struct drm_crtc *crtc,
10131                                 struct drm_crtc_state *old_crtc_state,
10132                                 struct drm_crtc_state *new_crtc_state,
10133                                 bool enable,
10134                                 bool *lock_and_validation_needed)
10135 {
10136         struct dm_atomic_state *dm_state = NULL;
10137         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10138         struct dc_stream_state *new_stream;
10139         int ret = 0;
10140
10141         /*
10142          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10143          * update changed items
10144          */
10145         struct amdgpu_crtc *acrtc = NULL;
10146         struct amdgpu_dm_connector *aconnector = NULL;
10147         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10148         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10149
10150         new_stream = NULL;
10151
10152         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10153         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10154         acrtc = to_amdgpu_crtc(crtc);
10155         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10156
10157         /* TODO This hack should go away */
10158         if (aconnector && enable) {
10159                 /* Make sure fake sink is created in plug-in scenario */
10160                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10161                                                             &aconnector->base);
10162                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10163                                                             &aconnector->base);
10164
10165                 if (IS_ERR(drm_new_conn_state)) {
10166                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10167                         goto fail;
10168                 }
10169
10170                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10171                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10172
10173                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10174                         goto skip_modeset;
10175
10176                 new_stream = create_validate_stream_for_sink(aconnector,
10177                                                              &new_crtc_state->mode,
10178                                                              dm_new_conn_state,
10179                                                              dm_old_crtc_state->stream);
10180
10181                 /*
10182                  * we can have no stream on ACTION_SET if a display
10183                  * was disconnected during S3, in this case it is not an
10184                  * error, the OS will be updated after detection, and
10185                  * will do the right thing on next atomic commit
10186                  */
10187
10188                 if (!new_stream) {
10189                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10190                                         __func__, acrtc->base.base.id);
10191                         ret = -ENOMEM;
10192                         goto fail;
10193                 }
10194
10195                 /*
10196                  * TODO: Check VSDB bits to decide whether this should
10197                  * be enabled or not.
10198                  */
10199                 new_stream->triggered_crtc_reset.enabled =
10200                         dm->force_timing_sync;
10201
10202                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10203
10204                 ret = fill_hdr_info_packet(drm_new_conn_state,
10205                                            &new_stream->hdr_static_metadata);
10206                 if (ret)
10207                         goto fail;
10208
10209                 /*
10210                  * If we already removed the old stream from the context
10211                  * (and set the new stream to NULL) then we can't reuse
10212                  * the old stream even if the stream and scaling are unchanged.
10213                  * We'll hit the BUG_ON and black screen.
10214                  *
10215                  * TODO: Refactor this function to allow this check to work
10216                  * in all conditions.
10217                  */
10218                 if (amdgpu_freesync_vid_mode &&
10219                     dm_new_crtc_state->stream &&
10220                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10221                         goto skip_modeset;
10222
10223                 if (dm_new_crtc_state->stream &&
10224                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10225                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10226                         new_crtc_state->mode_changed = false;
10227                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10228                                          new_crtc_state->mode_changed);
10229                 }
10230         }
10231
10232         /* mode_changed flag may get updated above, need to check again */
10233         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10234                 goto skip_modeset;
10235
10236         DRM_DEBUG_ATOMIC(
10237                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10238                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10239                 "connectors_changed:%d\n",
10240                 acrtc->crtc_id,
10241                 new_crtc_state->enable,
10242                 new_crtc_state->active,
10243                 new_crtc_state->planes_changed,
10244                 new_crtc_state->mode_changed,
10245                 new_crtc_state->active_changed,
10246                 new_crtc_state->connectors_changed);
10247
10248         /* Remove stream for any changed/disabled CRTC */
10249         if (!enable) {
10250
10251                 if (!dm_old_crtc_state->stream)
10252                         goto skip_modeset;
10253
10254                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10255                     is_timing_unchanged_for_freesync(new_crtc_state,
10256                                                      old_crtc_state)) {
10257                         new_crtc_state->mode_changed = false;
10258                         DRM_DEBUG_DRIVER(
10259                                 "Mode change not required for front porch change, "
10260                                 "setting mode_changed to %d",
10261                                 new_crtc_state->mode_changed);
10262
10263                         set_freesync_fixed_config(dm_new_crtc_state);
10264
10265                         goto skip_modeset;
10266                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10267                            is_freesync_video_mode(&new_crtc_state->mode,
10268                                                   aconnector)) {
10269                         struct drm_display_mode *high_mode;
10270
10271                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10272                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10273                                 set_freesync_fixed_config(dm_new_crtc_state);
10274                         }
10275                 }
10276
10277                 ret = dm_atomic_get_state(state, &dm_state);
10278                 if (ret)
10279                         goto fail;
10280
10281                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10282                                 crtc->base.id);
10283
10284                 /* i.e. reset mode */
10285                 if (dc_remove_stream_from_ctx(
10286                                 dm->dc,
10287                                 dm_state->context,
10288                                 dm_old_crtc_state->stream) != DC_OK) {
10289                         ret = -EINVAL;
10290                         goto fail;
10291                 }
10292
10293                 dc_stream_release(dm_old_crtc_state->stream);
10294                 dm_new_crtc_state->stream = NULL;
10295
10296                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10297
10298                 *lock_and_validation_needed = true;
10299
10300         } else {/* Add stream for any updated/enabled CRTC */
10301                 /*
10302                  * Quick fix to prevent NULL pointer on new_stream when
10303                  * added MST connectors not found in existing crtc_state in the chained mode
10304                  * TODO: need to dig out the root cause of that
10305                  */
10306                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10307                         goto skip_modeset;
10308
10309                 if (modereset_required(new_crtc_state))
10310                         goto skip_modeset;
10311
10312                 if (modeset_required(new_crtc_state, new_stream,
10313                                      dm_old_crtc_state->stream)) {
10314
10315                         WARN_ON(dm_new_crtc_state->stream);
10316
10317                         ret = dm_atomic_get_state(state, &dm_state);
10318                         if (ret)
10319                                 goto fail;
10320
10321                         dm_new_crtc_state->stream = new_stream;
10322
10323                         dc_stream_retain(new_stream);
10324
10325                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10326                                          crtc->base.id);
10327
10328                         if (dc_add_stream_to_ctx(
10329                                         dm->dc,
10330                                         dm_state->context,
10331                                         dm_new_crtc_state->stream) != DC_OK) {
10332                                 ret = -EINVAL;
10333                                 goto fail;
10334                         }
10335
10336                         *lock_and_validation_needed = true;
10337                 }
10338         }
10339
10340 skip_modeset:
10341         /* Release extra reference */
10342         if (new_stream)
10343                  dc_stream_release(new_stream);
10344
10345         /*
10346          * We want to do dc stream updates that do not require a
10347          * full modeset below.
10348          */
10349         if (!(enable && aconnector && new_crtc_state->active))
10350                 return 0;
10351         /*
10352          * Given above conditions, the dc state cannot be NULL because:
10353          * 1. We're in the process of enabling CRTCs (just been added
10354          *    to the dc context, or already is on the context)
10355          * 2. Has a valid connector attached, and
10356          * 3. Is currently active and enabled.
10357          * => The dc stream state currently exists.
10358          */
10359         BUG_ON(dm_new_crtc_state->stream == NULL);
10360
10361         /* Scaling or underscan settings */
10362         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10363                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10364                 update_stream_scaling_settings(
10365                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10366
10367         /* ABM settings */
10368         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10369
10370         /*
10371          * Color management settings. We also update color properties
10372          * when a modeset is needed, to ensure it gets reprogrammed.
10373          */
10374         if (dm_new_crtc_state->base.color_mgmt_changed ||
10375             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10376                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10377                 if (ret)
10378                         goto fail;
10379         }
10380
10381         /* Update Freesync settings. */
10382         get_freesync_config_for_crtc(dm_new_crtc_state,
10383                                      dm_new_conn_state);
10384
10385         return ret;
10386
10387 fail:
10388         if (new_stream)
10389                 dc_stream_release(new_stream);
10390         return ret;
10391 }
10392
10393 static bool should_reset_plane(struct drm_atomic_state *state,
10394                                struct drm_plane *plane,
10395                                struct drm_plane_state *old_plane_state,
10396                                struct drm_plane_state *new_plane_state)
10397 {
10398         struct drm_plane *other;
10399         struct drm_plane_state *old_other_state, *new_other_state;
10400         struct drm_crtc_state *new_crtc_state;
10401         int i;
10402
10403         /*
10404          * TODO: Remove this hack once the checks below are sufficient
10405          * enough to determine when we need to reset all the planes on
10406          * the stream.
10407          */
10408         if (state->allow_modeset)
10409                 return true;
10410
10411         /* Exit early if we know that we're adding or removing the plane. */
10412         if (old_plane_state->crtc != new_plane_state->crtc)
10413                 return true;
10414
10415         /* old crtc == new_crtc == NULL, plane not in context. */
10416         if (!new_plane_state->crtc)
10417                 return false;
10418
10419         new_crtc_state =
10420                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10421
10422         if (!new_crtc_state)
10423                 return true;
10424
10425         /* CRTC Degamma changes currently require us to recreate planes. */
10426         if (new_crtc_state->color_mgmt_changed)
10427                 return true;
10428
10429         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10430                 return true;
10431
10432         /*
10433          * If there are any new primary or overlay planes being added or
10434          * removed then the z-order can potentially change. To ensure
10435          * correct z-order and pipe acquisition the current DC architecture
10436          * requires us to remove and recreate all existing planes.
10437          *
10438          * TODO: Come up with a more elegant solution for this.
10439          */
10440         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10441                 struct amdgpu_framebuffer *old_afb, *new_afb;
10442                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10443                         continue;
10444
10445                 if (old_other_state->crtc != new_plane_state->crtc &&
10446                     new_other_state->crtc != new_plane_state->crtc)
10447                         continue;
10448
10449                 if (old_other_state->crtc != new_other_state->crtc)
10450                         return true;
10451
10452                 /* Src/dst size and scaling updates. */
10453                 if (old_other_state->src_w != new_other_state->src_w ||
10454                     old_other_state->src_h != new_other_state->src_h ||
10455                     old_other_state->crtc_w != new_other_state->crtc_w ||
10456                     old_other_state->crtc_h != new_other_state->crtc_h)
10457                         return true;
10458
10459                 /* Rotation / mirroring updates. */
10460                 if (old_other_state->rotation != new_other_state->rotation)
10461                         return true;
10462
10463                 /* Blending updates. */
10464                 if (old_other_state->pixel_blend_mode !=
10465                     new_other_state->pixel_blend_mode)
10466                         return true;
10467
10468                 /* Alpha updates. */
10469                 if (old_other_state->alpha != new_other_state->alpha)
10470                         return true;
10471
10472                 /* Colorspace changes. */
10473                 if (old_other_state->color_range != new_other_state->color_range ||
10474                     old_other_state->color_encoding != new_other_state->color_encoding)
10475                         return true;
10476
10477                 /* Framebuffer checks fall at the end. */
10478                 if (!old_other_state->fb || !new_other_state->fb)
10479                         continue;
10480
10481                 /* Pixel format changes can require bandwidth updates. */
10482                 if (old_other_state->fb->format != new_other_state->fb->format)
10483                         return true;
10484
10485                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10486                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10487
10488                 /* Tiling and DCC changes also require bandwidth updates. */
10489                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10490                     old_afb->base.modifier != new_afb->base.modifier)
10491                         return true;
10492         }
10493
10494         return false;
10495 }
10496
10497 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10498                               struct drm_plane_state *new_plane_state,
10499                               struct drm_framebuffer *fb)
10500 {
10501         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10502         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10503         unsigned int pitch;
10504         bool linear;
10505
10506         if (fb->width > new_acrtc->max_cursor_width ||
10507             fb->height > new_acrtc->max_cursor_height) {
10508                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10509                                  new_plane_state->fb->width,
10510                                  new_plane_state->fb->height);
10511                 return -EINVAL;
10512         }
10513         if (new_plane_state->src_w != fb->width << 16 ||
10514             new_plane_state->src_h != fb->height << 16) {
10515                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10516                 return -EINVAL;
10517         }
10518
10519         /* Pitch in pixels */
10520         pitch = fb->pitches[0] / fb->format->cpp[0];
10521
10522         if (fb->width != pitch) {
10523                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10524                                  fb->width, pitch);
10525                 return -EINVAL;
10526         }
10527
10528         switch (pitch) {
10529         case 64:
10530         case 128:
10531         case 256:
10532                 /* FB pitch is supported by cursor plane */
10533                 break;
10534         default:
10535                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10536                 return -EINVAL;
10537         }
10538
10539         /* Core DRM takes care of checking FB modifiers, so we only need to
10540          * check tiling flags when the FB doesn't have a modifier. */
10541         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10542                 if (adev->family < AMDGPU_FAMILY_AI) {
10543                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10544                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10545                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10546                 } else {
10547                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10548                 }
10549                 if (!linear) {
10550                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10551                         return -EINVAL;
10552                 }
10553         }
10554
10555         return 0;
10556 }
10557
10558 static int dm_update_plane_state(struct dc *dc,
10559                                  struct drm_atomic_state *state,
10560                                  struct drm_plane *plane,
10561                                  struct drm_plane_state *old_plane_state,
10562                                  struct drm_plane_state *new_plane_state,
10563                                  bool enable,
10564                                  bool *lock_and_validation_needed)
10565 {
10566
10567         struct dm_atomic_state *dm_state = NULL;
10568         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10569         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10570         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10571         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10572         struct amdgpu_crtc *new_acrtc;
10573         bool needs_reset;
10574         int ret = 0;
10575
10576
10577         new_plane_crtc = new_plane_state->crtc;
10578         old_plane_crtc = old_plane_state->crtc;
10579         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10580         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10581
10582         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10583                 if (!enable || !new_plane_crtc ||
10584                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10585                         return 0;
10586
10587                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10588
10589                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10590                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10591                         return -EINVAL;
10592                 }
10593
10594                 if (new_plane_state->fb) {
10595                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10596                                                  new_plane_state->fb);
10597                         if (ret)
10598                                 return ret;
10599                 }
10600
10601                 return 0;
10602         }
10603
10604         needs_reset = should_reset_plane(state, plane, old_plane_state,
10605                                          new_plane_state);
10606
10607         /* Remove any changed/removed planes */
10608         if (!enable) {
10609                 if (!needs_reset)
10610                         return 0;
10611
10612                 if (!old_plane_crtc)
10613                         return 0;
10614
10615                 old_crtc_state = drm_atomic_get_old_crtc_state(
10616                                 state, old_plane_crtc);
10617                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10618
10619                 if (!dm_old_crtc_state->stream)
10620                         return 0;
10621
10622                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10623                                 plane->base.id, old_plane_crtc->base.id);
10624
10625                 ret = dm_atomic_get_state(state, &dm_state);
10626                 if (ret)
10627                         return ret;
10628
10629                 if (!dc_remove_plane_from_context(
10630                                 dc,
10631                                 dm_old_crtc_state->stream,
10632                                 dm_old_plane_state->dc_state,
10633                                 dm_state->context)) {
10634
10635                         return -EINVAL;
10636                 }
10637
10638
10639                 dc_plane_state_release(dm_old_plane_state->dc_state);
10640                 dm_new_plane_state->dc_state = NULL;
10641
10642                 *lock_and_validation_needed = true;
10643
10644         } else { /* Add new planes */
10645                 struct dc_plane_state *dc_new_plane_state;
10646
10647                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10648                         return 0;
10649
10650                 if (!new_plane_crtc)
10651                         return 0;
10652
10653                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10654                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10655
10656                 if (!dm_new_crtc_state->stream)
10657                         return 0;
10658
10659                 if (!needs_reset)
10660                         return 0;
10661
10662                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10663                 if (ret)
10664                         return ret;
10665
10666                 WARN_ON(dm_new_plane_state->dc_state);
10667
10668                 dc_new_plane_state = dc_create_plane_state(dc);
10669                 if (!dc_new_plane_state)
10670                         return -ENOMEM;
10671
10672                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10673                                  plane->base.id, new_plane_crtc->base.id);
10674
10675                 ret = fill_dc_plane_attributes(
10676                         drm_to_adev(new_plane_crtc->dev),
10677                         dc_new_plane_state,
10678                         new_plane_state,
10679                         new_crtc_state);
10680                 if (ret) {
10681                         dc_plane_state_release(dc_new_plane_state);
10682                         return ret;
10683                 }
10684
10685                 ret = dm_atomic_get_state(state, &dm_state);
10686                 if (ret) {
10687                         dc_plane_state_release(dc_new_plane_state);
10688                         return ret;
10689                 }
10690
10691                 /*
10692                  * Any atomic check errors that occur after this will
10693                  * not need a release. The plane state will be attached
10694                  * to the stream, and therefore part of the atomic
10695                  * state. It'll be released when the atomic state is
10696                  * cleaned.
10697                  */
10698                 if (!dc_add_plane_to_context(
10699                                 dc,
10700                                 dm_new_crtc_state->stream,
10701                                 dc_new_plane_state,
10702                                 dm_state->context)) {
10703
10704                         dc_plane_state_release(dc_new_plane_state);
10705                         return -EINVAL;
10706                 }
10707
10708                 dm_new_plane_state->dc_state = dc_new_plane_state;
10709
10710                 /* Tell DC to do a full surface update every time there
10711                  * is a plane change. Inefficient, but works for now.
10712                  */
10713                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10714
10715                 *lock_and_validation_needed = true;
10716         }
10717
10718
10719         return ret;
10720 }
10721
10722 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10723                                 struct drm_crtc *crtc,
10724                                 struct drm_crtc_state *new_crtc_state)
10725 {
10726         struct drm_plane *cursor = crtc->cursor, *underlying;
10727         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10728         int i;
10729         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10730
10731         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10732          * cursor per pipe but it's going to inherit the scaling and
10733          * positioning from the underlying pipe. Check the cursor plane's
10734          * blending properties match the underlying planes'. */
10735
10736         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10737         if (!new_cursor_state || !new_cursor_state->fb) {
10738                 return 0;
10739         }
10740
10741         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10742                          (new_cursor_state->src_w >> 16);
10743         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10744                          (new_cursor_state->src_h >> 16);
10745
10746         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10747                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10748                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10749                         continue;
10750
10751                 /* Ignore disabled planes */
10752                 if (!new_underlying_state->fb)
10753                         continue;
10754
10755                 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10756                                      (new_underlying_state->src_w >> 16);
10757                 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10758                                      (new_underlying_state->src_h >> 16);
10759
10760                 if (cursor_scale_w != underlying_scale_w ||
10761                     cursor_scale_h != underlying_scale_h) {
10762                         drm_dbg_atomic(crtc->dev,
10763                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10764                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10765                         return -EINVAL;
10766                 }
10767
10768                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10769                 if (new_underlying_state->crtc_x <= 0 &&
10770                     new_underlying_state->crtc_y <= 0 &&
10771                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10772                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10773                         break;
10774         }
10775
10776         return 0;
10777 }
10778
10779 #if defined(CONFIG_DRM_AMD_DC_DCN)
10780 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10781 {
10782         struct drm_connector *connector;
10783         struct drm_connector_state *conn_state;
10784         struct amdgpu_dm_connector *aconnector = NULL;
10785         int i;
10786         for_each_new_connector_in_state(state, connector, conn_state, i) {
10787                 if (conn_state->crtc != crtc)
10788                         continue;
10789
10790                 aconnector = to_amdgpu_dm_connector(connector);
10791                 if (!aconnector->port || !aconnector->mst_port)
10792                         aconnector = NULL;
10793                 else
10794                         break;
10795         }
10796
10797         if (!aconnector)
10798                 return 0;
10799
10800         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10801 }
10802 #endif
10803
10804 /**
10805  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10806  * @dev: The DRM device
10807  * @state: The atomic state to commit
10808  *
10809  * Validate that the given atomic state is programmable by DC into hardware.
10810  * This involves constructing a &struct dc_state reflecting the new hardware
10811  * state we wish to commit, then querying DC to see if it is programmable. It's
10812  * important not to modify the existing DC state. Otherwise, atomic_check
10813  * may unexpectedly commit hardware changes.
10814  *
10815  * When validating the DC state, it's important that the right locks are
10816  * acquired. For full updates case which removes/adds/updates streams on one
10817  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10818  * that any such full update commit will wait for completion of any outstanding
10819  * flip using DRMs synchronization events.
10820  *
10821  * Note that DM adds the affected connectors for all CRTCs in state, when that
10822  * might not seem necessary. This is because DC stream creation requires the
10823  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10824  * be possible but non-trivial - a possible TODO item.
10825  *
10826  * Return: -Error code if validation failed.
10827  */
10828 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10829                                   struct drm_atomic_state *state)
10830 {
10831         struct amdgpu_device *adev = drm_to_adev(dev);
10832         struct dm_atomic_state *dm_state = NULL;
10833         struct dc *dc = adev->dm.dc;
10834         struct drm_connector *connector;
10835         struct drm_connector_state *old_con_state, *new_con_state;
10836         struct drm_crtc *crtc;
10837         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10838         struct drm_plane *plane;
10839         struct drm_plane_state *old_plane_state, *new_plane_state;
10840         enum dc_status status;
10841         int ret, i;
10842         bool lock_and_validation_needed = false;
10843         struct dm_crtc_state *dm_old_crtc_state;
10844 #if defined(CONFIG_DRM_AMD_DC_DCN)
10845         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10846         struct drm_dp_mst_topology_state *mst_state;
10847         struct drm_dp_mst_topology_mgr *mgr;
10848 #endif
10849
10850         trace_amdgpu_dm_atomic_check_begin(state);
10851
10852         ret = drm_atomic_helper_check_modeset(dev, state);
10853         if (ret) {
10854                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10855                 goto fail;
10856         }
10857
10858         /* Check connector changes */
10859         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10860                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10861                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10862
10863                 /* Skip connectors that are disabled or part of modeset already. */
10864                 if (!old_con_state->crtc && !new_con_state->crtc)
10865                         continue;
10866
10867                 if (!new_con_state->crtc)
10868                         continue;
10869
10870                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10871                 if (IS_ERR(new_crtc_state)) {
10872                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10873                         ret = PTR_ERR(new_crtc_state);
10874                         goto fail;
10875                 }
10876
10877                 if (dm_old_con_state->abm_level !=
10878                     dm_new_con_state->abm_level)
10879                         new_crtc_state->connectors_changed = true;
10880         }
10881
10882 #if defined(CONFIG_DRM_AMD_DC_DCN)
10883         if (dc_resource_is_dsc_encoding_supported(dc)) {
10884                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10885                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10886                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10887                                 if (ret) {
10888                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10889                                         goto fail;
10890                                 }
10891                         }
10892                 }
10893         }
10894 #endif
10895         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10896                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10897
10898                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10899                     !new_crtc_state->color_mgmt_changed &&
10900                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10901                         dm_old_crtc_state->dsc_force_changed == false)
10902                         continue;
10903
10904                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10905                 if (ret) {
10906                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10907                         goto fail;
10908                 }
10909
10910                 if (!new_crtc_state->enable)
10911                         continue;
10912
10913                 ret = drm_atomic_add_affected_connectors(state, crtc);
10914                 if (ret) {
10915                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10916                         goto fail;
10917                 }
10918
10919                 ret = drm_atomic_add_affected_planes(state, crtc);
10920                 if (ret) {
10921                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10922                         goto fail;
10923                 }
10924
10925                 if (dm_old_crtc_state->dsc_force_changed)
10926                         new_crtc_state->mode_changed = true;
10927         }
10928
10929         /*
10930          * Add all primary and overlay planes on the CRTC to the state
10931          * whenever a plane is enabled to maintain correct z-ordering
10932          * and to enable fast surface updates.
10933          */
10934         drm_for_each_crtc(crtc, dev) {
10935                 bool modified = false;
10936
10937                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10938                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10939                                 continue;
10940
10941                         if (new_plane_state->crtc == crtc ||
10942                             old_plane_state->crtc == crtc) {
10943                                 modified = true;
10944                                 break;
10945                         }
10946                 }
10947
10948                 if (!modified)
10949                         continue;
10950
10951                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10952                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10953                                 continue;
10954
10955                         new_plane_state =
10956                                 drm_atomic_get_plane_state(state, plane);
10957
10958                         if (IS_ERR(new_plane_state)) {
10959                                 ret = PTR_ERR(new_plane_state);
10960                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10961                                 goto fail;
10962                         }
10963                 }
10964         }
10965
10966         /* Remove exiting planes if they are modified */
10967         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10968                 ret = dm_update_plane_state(dc, state, plane,
10969                                             old_plane_state,
10970                                             new_plane_state,
10971                                             false,
10972                                             &lock_and_validation_needed);
10973                 if (ret) {
10974                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10975                         goto fail;
10976                 }
10977         }
10978
10979         /* Disable all crtcs which require disable */
10980         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10981                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10982                                            old_crtc_state,
10983                                            new_crtc_state,
10984                                            false,
10985                                            &lock_and_validation_needed);
10986                 if (ret) {
10987                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10988                         goto fail;
10989                 }
10990         }
10991
10992         /* Enable all crtcs which require enable */
10993         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10994                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10995                                            old_crtc_state,
10996                                            new_crtc_state,
10997                                            true,
10998                                            &lock_and_validation_needed);
10999                 if (ret) {
11000                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11001                         goto fail;
11002                 }
11003         }
11004
11005         /* Add new/modified planes */
11006         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11007                 ret = dm_update_plane_state(dc, state, plane,
11008                                             old_plane_state,
11009                                             new_plane_state,
11010                                             true,
11011                                             &lock_and_validation_needed);
11012                 if (ret) {
11013                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11014                         goto fail;
11015                 }
11016         }
11017
11018         /* Run this here since we want to validate the streams we created */
11019         ret = drm_atomic_helper_check_planes(dev, state);
11020         if (ret) {
11021                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11022                 goto fail;
11023         }
11024
11025         /* Check cursor planes scaling */
11026         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11027                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11028                 if (ret) {
11029                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11030                         goto fail;
11031                 }
11032         }
11033
11034         if (state->legacy_cursor_update) {
11035                 /*
11036                  * This is a fast cursor update coming from the plane update
11037                  * helper, check if it can be done asynchronously for better
11038                  * performance.
11039                  */
11040                 state->async_update =
11041                         !drm_atomic_helper_async_check(dev, state);
11042
11043                 /*
11044                  * Skip the remaining global validation if this is an async
11045                  * update. Cursor updates can be done without affecting
11046                  * state or bandwidth calcs and this avoids the performance
11047                  * penalty of locking the private state object and
11048                  * allocating a new dc_state.
11049                  */
11050                 if (state->async_update)
11051                         return 0;
11052         }
11053
11054         /* Check scaling and underscan changes*/
11055         /* TODO Removed scaling changes validation due to inability to commit
11056          * new stream into context w\o causing full reset. Need to
11057          * decide how to handle.
11058          */
11059         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11060                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11061                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11062                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11063
11064                 /* Skip any modesets/resets */
11065                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11066                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11067                         continue;
11068
11069                 /* Skip any thing not scale or underscan changes */
11070                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11071                         continue;
11072
11073                 lock_and_validation_needed = true;
11074         }
11075
11076 #if defined(CONFIG_DRM_AMD_DC_DCN)
11077         /* set the slot info for each mst_state based on the link encoding format */
11078         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11079                 struct amdgpu_dm_connector *aconnector;
11080                 struct drm_connector *connector;
11081                 struct drm_connector_list_iter iter;
11082                 u8 link_coding_cap;
11083
11084                 if (!mgr->mst_state )
11085                         continue;
11086
11087                 drm_connector_list_iter_begin(dev, &iter);
11088                 drm_for_each_connector_iter(connector, &iter) {
11089                         int id = connector->index;
11090
11091                         if (id == mst_state->mgr->conn_base_id) {
11092                                 aconnector = to_amdgpu_dm_connector(connector);
11093                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11094                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11095
11096                                 break;
11097                         }
11098                 }
11099                 drm_connector_list_iter_end(&iter);
11100
11101         }
11102 #endif
11103         /**
11104          * Streams and planes are reset when there are changes that affect
11105          * bandwidth. Anything that affects bandwidth needs to go through
11106          * DC global validation to ensure that the configuration can be applied
11107          * to hardware.
11108          *
11109          * We have to currently stall out here in atomic_check for outstanding
11110          * commits to finish in this case because our IRQ handlers reference
11111          * DRM state directly - we can end up disabling interrupts too early
11112          * if we don't.
11113          *
11114          * TODO: Remove this stall and drop DM state private objects.
11115          */
11116         if (lock_and_validation_needed) {
11117                 ret = dm_atomic_get_state(state, &dm_state);
11118                 if (ret) {
11119                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11120                         goto fail;
11121                 }
11122
11123                 ret = do_aquire_global_lock(dev, state);
11124                 if (ret) {
11125                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11126                         goto fail;
11127                 }
11128
11129 #if defined(CONFIG_DRM_AMD_DC_DCN)
11130                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11131                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11132                         goto fail;
11133                 }
11134
11135                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11136                 if (ret) {
11137                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11138                         goto fail;
11139                 }
11140 #endif
11141
11142                 /*
11143                  * Perform validation of MST topology in the state:
11144                  * We need to perform MST atomic check before calling
11145                  * dc_validate_global_state(), or there is a chance
11146                  * to get stuck in an infinite loop and hang eventually.
11147                  */
11148                 ret = drm_dp_mst_atomic_check(state);
11149                 if (ret) {
11150                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11151                         goto fail;
11152                 }
11153                 status = dc_validate_global_state(dc, dm_state->context, true);
11154                 if (status != DC_OK) {
11155                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11156                                        dc_status_to_str(status), status);
11157                         ret = -EINVAL;
11158                         goto fail;
11159                 }
11160         } else {
11161                 /*
11162                  * The commit is a fast update. Fast updates shouldn't change
11163                  * the DC context, affect global validation, and can have their
11164                  * commit work done in parallel with other commits not touching
11165                  * the same resource. If we have a new DC context as part of
11166                  * the DM atomic state from validation we need to free it and
11167                  * retain the existing one instead.
11168                  *
11169                  * Furthermore, since the DM atomic state only contains the DC
11170                  * context and can safely be annulled, we can free the state
11171                  * and clear the associated private object now to free
11172                  * some memory and avoid a possible use-after-free later.
11173                  */
11174
11175                 for (i = 0; i < state->num_private_objs; i++) {
11176                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11177
11178                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11179                                 int j = state->num_private_objs-1;
11180
11181                                 dm_atomic_destroy_state(obj,
11182                                                 state->private_objs[i].state);
11183
11184                                 /* If i is not at the end of the array then the
11185                                  * last element needs to be moved to where i was
11186                                  * before the array can safely be truncated.
11187                                  */
11188                                 if (i != j)
11189                                         state->private_objs[i] =
11190                                                 state->private_objs[j];
11191
11192                                 state->private_objs[j].ptr = NULL;
11193                                 state->private_objs[j].state = NULL;
11194                                 state->private_objs[j].old_state = NULL;
11195                                 state->private_objs[j].new_state = NULL;
11196
11197                                 state->num_private_objs = j;
11198                                 break;
11199                         }
11200                 }
11201         }
11202
11203         /* Store the overall update type for use later in atomic check. */
11204         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11205                 struct dm_crtc_state *dm_new_crtc_state =
11206                         to_dm_crtc_state(new_crtc_state);
11207
11208                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11209                                                          UPDATE_TYPE_FULL :
11210                                                          UPDATE_TYPE_FAST;
11211         }
11212
11213         /* Must be success */
11214         WARN_ON(ret);
11215
11216         trace_amdgpu_dm_atomic_check_finish(state, ret);
11217
11218         return ret;
11219
11220 fail:
11221         if (ret == -EDEADLK)
11222                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11223         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11224                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11225         else
11226                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11227
11228         trace_amdgpu_dm_atomic_check_finish(state, ret);
11229
11230         return ret;
11231 }
11232
11233 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11234                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11235 {
11236         uint8_t dpcd_data;
11237         bool capable = false;
11238
11239         if (amdgpu_dm_connector->dc_link &&
11240                 dm_helpers_dp_read_dpcd(
11241                                 NULL,
11242                                 amdgpu_dm_connector->dc_link,
11243                                 DP_DOWN_STREAM_PORT_COUNT,
11244                                 &dpcd_data,
11245                                 sizeof(dpcd_data))) {
11246                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11247         }
11248
11249         return capable;
11250 }
11251
11252 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11253                 unsigned int offset,
11254                 unsigned int total_length,
11255                 uint8_t *data,
11256                 unsigned int length,
11257                 struct amdgpu_hdmi_vsdb_info *vsdb)
11258 {
11259         bool res;
11260         union dmub_rb_cmd cmd;
11261         struct dmub_cmd_send_edid_cea *input;
11262         struct dmub_cmd_edid_cea_output *output;
11263
11264         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11265                 return false;
11266
11267         memset(&cmd, 0, sizeof(cmd));
11268
11269         input = &cmd.edid_cea.data.input;
11270
11271         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11272         cmd.edid_cea.header.sub_type = 0;
11273         cmd.edid_cea.header.payload_bytes =
11274                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11275         input->offset = offset;
11276         input->length = length;
11277         input->total_length = total_length;
11278         memcpy(input->payload, data, length);
11279
11280         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11281         if (!res) {
11282                 DRM_ERROR("EDID CEA parser failed\n");
11283                 return false;
11284         }
11285
11286         output = &cmd.edid_cea.data.output;
11287
11288         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11289                 if (!output->ack.success) {
11290                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11291                                         output->ack.offset);
11292                 }
11293         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11294                 if (!output->amd_vsdb.vsdb_found)
11295                         return false;
11296
11297                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11298                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11299                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11300                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11301         } else {
11302                 DRM_WARN("Unknown EDID CEA parser results\n");
11303                 return false;
11304         }
11305
11306         return true;
11307 }
11308
11309 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11310                 uint8_t *edid_ext, int len,
11311                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11312 {
11313         int i;
11314
11315         /* send extension block to DMCU for parsing */
11316         for (i = 0; i < len; i += 8) {
11317                 bool res;
11318                 int offset;
11319
11320                 /* send 8 bytes a time */
11321                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11322                         return false;
11323
11324                 if (i+8 == len) {
11325                         /* EDID block sent completed, expect result */
11326                         int version, min_rate, max_rate;
11327
11328                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11329                         if (res) {
11330                                 /* amd vsdb found */
11331                                 vsdb_info->freesync_supported = 1;
11332                                 vsdb_info->amd_vsdb_version = version;
11333                                 vsdb_info->min_refresh_rate_hz = min_rate;
11334                                 vsdb_info->max_refresh_rate_hz = max_rate;
11335                                 return true;
11336                         }
11337                         /* not amd vsdb */
11338                         return false;
11339                 }
11340
11341                 /* check for ack*/
11342                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11343                 if (!res)
11344                         return false;
11345         }
11346
11347         return false;
11348 }
11349
11350 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11351                 uint8_t *edid_ext, int len,
11352                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11353 {
11354         int i;
11355
11356         /* send extension block to DMCU for parsing */
11357         for (i = 0; i < len; i += 8) {
11358                 /* send 8 bytes a time */
11359                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11360                         return false;
11361         }
11362
11363         return vsdb_info->freesync_supported;
11364 }
11365
11366 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11367                 uint8_t *edid_ext, int len,
11368                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11369 {
11370         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11371
11372         if (adev->dm.dmub_srv)
11373                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11374         else
11375                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11376 }
11377
11378 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11379                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11380 {
11381         uint8_t *edid_ext = NULL;
11382         int i;
11383         bool valid_vsdb_found = false;
11384
11385         /*----- drm_find_cea_extension() -----*/
11386         /* No EDID or EDID extensions */
11387         if (edid == NULL || edid->extensions == 0)
11388                 return -ENODEV;
11389
11390         /* Find CEA extension */
11391         for (i = 0; i < edid->extensions; i++) {
11392                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11393                 if (edid_ext[0] == CEA_EXT)
11394                         break;
11395         }
11396
11397         if (i == edid->extensions)
11398                 return -ENODEV;
11399
11400         /*----- cea_db_offsets() -----*/
11401         if (edid_ext[0] != CEA_EXT)
11402                 return -ENODEV;
11403
11404         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11405
11406         return valid_vsdb_found ? i : -ENODEV;
11407 }
11408
11409 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11410                                         struct edid *edid)
11411 {
11412         int i = 0;
11413         struct detailed_timing *timing;
11414         struct detailed_non_pixel *data;
11415         struct detailed_data_monitor_range *range;
11416         struct amdgpu_dm_connector *amdgpu_dm_connector =
11417                         to_amdgpu_dm_connector(connector);
11418         struct dm_connector_state *dm_con_state = NULL;
11419         struct dc_sink *sink;
11420
11421         struct drm_device *dev = connector->dev;
11422         struct amdgpu_device *adev = drm_to_adev(dev);
11423         bool freesync_capable = false;
11424         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11425
11426         if (!connector->state) {
11427                 DRM_ERROR("%s - Connector has no state", __func__);
11428                 goto update;
11429         }
11430
11431         sink = amdgpu_dm_connector->dc_sink ?
11432                 amdgpu_dm_connector->dc_sink :
11433                 amdgpu_dm_connector->dc_em_sink;
11434
11435         if (!edid || !sink) {
11436                 dm_con_state = to_dm_connector_state(connector->state);
11437
11438                 amdgpu_dm_connector->min_vfreq = 0;
11439                 amdgpu_dm_connector->max_vfreq = 0;
11440                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11441                 connector->display_info.monitor_range.min_vfreq = 0;
11442                 connector->display_info.monitor_range.max_vfreq = 0;
11443                 freesync_capable = false;
11444
11445                 goto update;
11446         }
11447
11448         dm_con_state = to_dm_connector_state(connector->state);
11449
11450         if (!adev->dm.freesync_module)
11451                 goto update;
11452
11453
11454         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11455                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11456                 bool edid_check_required = false;
11457
11458                 if (edid) {
11459                         edid_check_required = is_dp_capable_without_timing_msa(
11460                                                 adev->dm.dc,
11461                                                 amdgpu_dm_connector);
11462                 }
11463
11464                 if (edid_check_required == true && (edid->version > 1 ||
11465                    (edid->version == 1 && edid->revision > 1))) {
11466                         for (i = 0; i < 4; i++) {
11467
11468                                 timing  = &edid->detailed_timings[i];
11469                                 data    = &timing->data.other_data;
11470                                 range   = &data->data.range;
11471                                 /*
11472                                  * Check if monitor has continuous frequency mode
11473                                  */
11474                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11475                                         continue;
11476                                 /*
11477                                  * Check for flag range limits only. If flag == 1 then
11478                                  * no additional timing information provided.
11479                                  * Default GTF, GTF Secondary curve and CVT are not
11480                                  * supported
11481                                  */
11482                                 if (range->flags != 1)
11483                                         continue;
11484
11485                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11486                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11487                                 amdgpu_dm_connector->pixel_clock_mhz =
11488                                         range->pixel_clock_mhz * 10;
11489
11490                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11491                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11492
11493                                 break;
11494                         }
11495
11496                         if (amdgpu_dm_connector->max_vfreq -
11497                             amdgpu_dm_connector->min_vfreq > 10) {
11498
11499                                 freesync_capable = true;
11500                         }
11501                 }
11502         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11503                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11504                 if (i >= 0 && vsdb_info.freesync_supported) {
11505                         timing  = &edid->detailed_timings[i];
11506                         data    = &timing->data.other_data;
11507
11508                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11509                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11510                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11511                                 freesync_capable = true;
11512
11513                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11514                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11515                 }
11516         }
11517
11518 update:
11519         if (dm_con_state)
11520                 dm_con_state->freesync_capable = freesync_capable;
11521
11522         if (connector->vrr_capable_property)
11523                 drm_connector_set_vrr_capable_property(connector,
11524                                                        freesync_capable);
11525 }
11526
11527 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11528 {
11529         struct amdgpu_device *adev = drm_to_adev(dev);
11530         struct dc *dc = adev->dm.dc;
11531         int i;
11532
11533         mutex_lock(&adev->dm.dc_lock);
11534         if (dc->current_state) {
11535                 for (i = 0; i < dc->current_state->stream_count; ++i)
11536                         dc->current_state->streams[i]
11537                                 ->triggered_crtc_reset.enabled =
11538                                 adev->dm.force_timing_sync;
11539
11540                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11541                 dc_trigger_sync(dc, dc->current_state);
11542         }
11543         mutex_unlock(&adev->dm.dc_lock);
11544 }
11545
11546 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11547                        uint32_t value, const char *func_name)
11548 {
11549 #ifdef DM_CHECK_ADDR_0
11550         if (address == 0) {
11551                 DC_ERR("invalid register write. address = 0");
11552                 return;
11553         }
11554 #endif
11555         cgs_write_register(ctx->cgs_device, address, value);
11556         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11557 }
11558
11559 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11560                           const char *func_name)
11561 {
11562         uint32_t value;
11563 #ifdef DM_CHECK_ADDR_0
11564         if (address == 0) {
11565                 DC_ERR("invalid register read; address = 0\n");
11566                 return 0;
11567         }
11568 #endif
11569
11570         if (ctx->dmub_srv &&
11571             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11572             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11573                 ASSERT(false);
11574                 return 0;
11575         }
11576
11577         value = cgs_read_register(ctx->cgs_device, address);
11578
11579         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11580
11581         return value;
11582 }
11583
11584 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11585         uint8_t status_type, uint32_t *operation_result)
11586 {
11587         struct amdgpu_device *adev = ctx->driver_context;
11588         int return_status = -1;
11589         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11590
11591         if (is_cmd_aux) {
11592                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11593                         return_status = p_notify->aux_reply.length;
11594                         *operation_result = p_notify->result;
11595                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11596                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11597                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11598                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11599                 } else {
11600                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11601                 }
11602         } else {
11603                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11604                         return_status = 0;
11605                         *operation_result = p_notify->sc_status;
11606                 } else {
11607                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11608                 }
11609         }
11610
11611         return return_status;
11612 }
11613
11614 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11615         unsigned int link_index, void *cmd_payload, void *operation_result)
11616 {
11617         struct amdgpu_device *adev = ctx->driver_context;
11618         int ret = 0;
11619
11620         if (is_cmd_aux) {
11621                 dc_process_dmub_aux_transfer_async(ctx->dc,
11622                         link_index, (struct aux_payload *)cmd_payload);
11623         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11624                                         (struct set_config_cmd_payload *)cmd_payload,
11625                                         adev->dm.dmub_notify)) {
11626                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11627                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11628                                         (uint32_t *)operation_result);
11629         }
11630
11631         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11632         if (ret == 0) {
11633                 DRM_ERROR("wait_for_completion_timeout timeout!");
11634                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11635                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11636                                 (uint32_t *)operation_result);
11637         }
11638
11639         if (is_cmd_aux) {
11640                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11641                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11642
11643                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11644                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11645                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11646                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11647                                        adev->dm.dmub_notify->aux_reply.length);
11648                         }
11649                 }
11650         }
11651
11652         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11653                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11654                         (uint32_t *)operation_result);
11655 }
11656
11657 /*
11658  * Check whether seamless boot is supported.
11659  *
11660  * So far we only support seamless boot on CHIP_VANGOGH.
11661  * If everything goes well, we may consider expanding
11662  * seamless boot to other ASICs.
11663  */
11664 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11665 {
11666         switch (adev->asic_type) {
11667         case CHIP_VANGOGH:
11668                 if (!adev->mman.keep_stolen_vga_memory)
11669                         return true;
11670                 break;
11671         default:
11672                 break;
11673         }
11674
11675         return false;
11676 }
This page took 0.741226 seconds and 4 git commands to generate.