]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'amd-drm-next-5.18-2022-02-11-1' of https://gitlab.freedesktop.org/agd5f...
[J-linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636                                         struct dmub_notification *notify)
637 {
638         if (adev->dm.dmub_notify)
639                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641                 complete(&adev->dm.dmub_aux_transfer_done);
642 }
643
644 /**
645  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646  * @adev: amdgpu_device pointer
647  * @notify: dmub notification structure
648  *
649  * Dmub Hpd interrupt processing callback. Gets displayindex through the
650  * ink index and calls helper to do the processing.
651  */
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653                               struct dmub_notification *notify)
654 {
655         struct amdgpu_dm_connector *aconnector;
656         struct amdgpu_dm_connector *hpd_aconnector = NULL;
657         struct drm_connector *connector;
658         struct drm_connector_list_iter iter;
659         struct dc_link *link;
660         uint8_t link_index = 0;
661         struct drm_device *dev;
662
663         if (adev == NULL)
664                 return;
665
666         if (notify == NULL) {
667                 DRM_ERROR("DMUB HPD callback notification was NULL");
668                 return;
669         }
670
671         if (notify->link_index > adev->dm.dc->link_count) {
672                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
673                 return;
674         }
675
676         link_index = notify->link_index;
677         link = adev->dm.dc->links[link_index];
678         dev = adev->dm.ddev;
679
680         drm_connector_list_iter_begin(dev, &iter);
681         drm_for_each_connector_iter(connector, &iter) {
682                 aconnector = to_amdgpu_dm_connector(connector);
683                 if (link && aconnector->dc_link == link) {
684                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
685                         hpd_aconnector = aconnector;
686                         break;
687                 }
688         }
689         drm_connector_list_iter_end(&iter);
690
691         if (hpd_aconnector) {
692                 if (notify->type == DMUB_NOTIFICATION_HPD)
693                         handle_hpd_irq_helper(hpd_aconnector);
694                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
695                         handle_hpd_rx_irq(hpd_aconnector);
696         }
697 }
698
699 /**
700  * register_dmub_notify_callback - Sets callback for DMUB notify
701  * @adev: amdgpu_device pointer
702  * @type: Type of dmub notification
703  * @callback: Dmub interrupt callback function
704  * @dmub_int_thread_offload: offload indicator
705  *
706  * API to register a dmub callback handler for a dmub notification
707  * Also sets indicator whether callback processing to be offloaded.
708  * to dmub interrupt handling thread
709  * Return: true if successfully registered, false if there is existing registration
710  */
711 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
712                                           enum dmub_notification_type type,
713                                           dmub_notify_interrupt_callback_t callback,
714                                           bool dmub_int_thread_offload)
715 {
716         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
717                 adev->dm.dmub_callback[type] = callback;
718                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
719         } else
720                 return false;
721
722         return true;
723 }
724
725 static void dm_handle_hpd_work(struct work_struct *work)
726 {
727         struct dmub_hpd_work *dmub_hpd_wrk;
728
729         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
730
731         if (!dmub_hpd_wrk->dmub_notify) {
732                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
733                 return;
734         }
735
736         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
737                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
738                 dmub_hpd_wrk->dmub_notify);
739         }
740
741         kfree(dmub_hpd_wrk->dmub_notify);
742         kfree(dmub_hpd_wrk);
743
744 }
745
746 #define DMUB_TRACE_MAX_READ 64
747 /**
748  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
749  * @interrupt_params: used for determining the Outbox instance
750  *
751  * Handles the Outbox Interrupt
752  * event handler.
753  */
754 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
755 {
756         struct dmub_notification notify;
757         struct common_irq_params *irq_params = interrupt_params;
758         struct amdgpu_device *adev = irq_params->adev;
759         struct amdgpu_display_manager *dm = &adev->dm;
760         struct dmcub_trace_buf_entry entry = { 0 };
761         uint32_t count = 0;
762         struct dmub_hpd_work *dmub_hpd_wrk;
763         struct dc_link *plink = NULL;
764
765         if (dc_enable_dmub_notifications(adev->dm.dc) &&
766                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
767
768                 do {
769                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
770                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
771                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
772                                 continue;
773                         }
774                         if (!dm->dmub_callback[notify.type]) {
775                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
776                                 continue;
777                         }
778                         if (dm->dmub_thread_offload[notify.type] == true) {
779                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk) {
781                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
782                                         return;
783                                 }
784                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
785                                 if (!dmub_hpd_wrk->dmub_notify) {
786                                         kfree(dmub_hpd_wrk);
787                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
788                                         return;
789                                 }
790                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
791                                 if (dmub_hpd_wrk->dmub_notify)
792                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
793                                 dmub_hpd_wrk->adev = adev;
794                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
795                                         plink = adev->dm.dc->links[notify.link_index];
796                                         if (plink) {
797                                                 plink->hpd_status =
798                                                         notify.hpd_status == DP_HPD_PLUG;
799                                         }
800                                 }
801                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
802                         } else {
803                                 dm->dmub_callback[notify.type](adev, &notify);
804                         }
805                 } while (notify.pending_notification);
806         }
807
808
809         do {
810                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
811                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
812                                                         entry.param0, entry.param1);
813
814                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
815                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
816                 } else
817                         break;
818
819                 count++;
820
821         } while (count <= DMUB_TRACE_MAX_READ);
822
823         if (count > DMUB_TRACE_MAX_READ)
824                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
825 }
826 #endif /* CONFIG_DRM_AMD_DC_DCN */
827
828 static int dm_set_clockgating_state(void *handle,
829                   enum amd_clockgating_state state)
830 {
831         return 0;
832 }
833
834 static int dm_set_powergating_state(void *handle,
835                   enum amd_powergating_state state)
836 {
837         return 0;
838 }
839
840 /* Prototypes of private functions */
841 static int dm_early_init(void* handle);
842
843 /* Allocate memory for FBC compressed data  */
844 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
845 {
846         struct drm_device *dev = connector->dev;
847         struct amdgpu_device *adev = drm_to_adev(dev);
848         struct dm_compressor_info *compressor = &adev->dm.compressor;
849         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
850         struct drm_display_mode *mode;
851         unsigned long max_size = 0;
852
853         if (adev->dm.dc->fbc_compressor == NULL)
854                 return;
855
856         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
857                 return;
858
859         if (compressor->bo_ptr)
860                 return;
861
862
863         list_for_each_entry(mode, &connector->modes, head) {
864                 if (max_size < mode->htotal * mode->vtotal)
865                         max_size = mode->htotal * mode->vtotal;
866         }
867
868         if (max_size) {
869                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
870                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
871                             &compressor->gpu_addr, &compressor->cpu_addr);
872
873                 if (r)
874                         DRM_ERROR("DM: Failed to initialize FBC\n");
875                 else {
876                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
877                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
878                 }
879
880         }
881
882 }
883
884 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
885                                           int pipe, bool *enabled,
886                                           unsigned char *buf, int max_bytes)
887 {
888         struct drm_device *dev = dev_get_drvdata(kdev);
889         struct amdgpu_device *adev = drm_to_adev(dev);
890         struct drm_connector *connector;
891         struct drm_connector_list_iter conn_iter;
892         struct amdgpu_dm_connector *aconnector;
893         int ret = 0;
894
895         *enabled = false;
896
897         mutex_lock(&adev->dm.audio_lock);
898
899         drm_connector_list_iter_begin(dev, &conn_iter);
900         drm_for_each_connector_iter(connector, &conn_iter) {
901                 aconnector = to_amdgpu_dm_connector(connector);
902                 if (aconnector->audio_inst != port)
903                         continue;
904
905                 *enabled = true;
906                 ret = drm_eld_size(connector->eld);
907                 memcpy(buf, connector->eld, min(max_bytes, ret));
908
909                 break;
910         }
911         drm_connector_list_iter_end(&conn_iter);
912
913         mutex_unlock(&adev->dm.audio_lock);
914
915         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
916
917         return ret;
918 }
919
920 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
921         .get_eld = amdgpu_dm_audio_component_get_eld,
922 };
923
924 static int amdgpu_dm_audio_component_bind(struct device *kdev,
925                                        struct device *hda_kdev, void *data)
926 {
927         struct drm_device *dev = dev_get_drvdata(kdev);
928         struct amdgpu_device *adev = drm_to_adev(dev);
929         struct drm_audio_component *acomp = data;
930
931         acomp->ops = &amdgpu_dm_audio_component_ops;
932         acomp->dev = kdev;
933         adev->dm.audio_component = acomp;
934
935         return 0;
936 }
937
938 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
939                                           struct device *hda_kdev, void *data)
940 {
941         struct drm_device *dev = dev_get_drvdata(kdev);
942         struct amdgpu_device *adev = drm_to_adev(dev);
943         struct drm_audio_component *acomp = data;
944
945         acomp->ops = NULL;
946         acomp->dev = NULL;
947         adev->dm.audio_component = NULL;
948 }
949
950 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
951         .bind   = amdgpu_dm_audio_component_bind,
952         .unbind = amdgpu_dm_audio_component_unbind,
953 };
954
955 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
956 {
957         int i, ret;
958
959         if (!amdgpu_audio)
960                 return 0;
961
962         adev->mode_info.audio.enabled = true;
963
964         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
965
966         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
967                 adev->mode_info.audio.pin[i].channels = -1;
968                 adev->mode_info.audio.pin[i].rate = -1;
969                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
970                 adev->mode_info.audio.pin[i].status_bits = 0;
971                 adev->mode_info.audio.pin[i].category_code = 0;
972                 adev->mode_info.audio.pin[i].connected = false;
973                 adev->mode_info.audio.pin[i].id =
974                         adev->dm.dc->res_pool->audios[i]->inst;
975                 adev->mode_info.audio.pin[i].offset = 0;
976         }
977
978         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
979         if (ret < 0)
980                 return ret;
981
982         adev->dm.audio_registered = true;
983
984         return 0;
985 }
986
987 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
988 {
989         if (!amdgpu_audio)
990                 return;
991
992         if (!adev->mode_info.audio.enabled)
993                 return;
994
995         if (adev->dm.audio_registered) {
996                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
997                 adev->dm.audio_registered = false;
998         }
999
1000         /* TODO: Disable audio? */
1001
1002         adev->mode_info.audio.enabled = false;
1003 }
1004
1005 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1006 {
1007         struct drm_audio_component *acomp = adev->dm.audio_component;
1008
1009         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1010                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1011
1012                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1013                                                  pin, -1);
1014         }
1015 }
1016
1017 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1018 {
1019         const struct dmcub_firmware_header_v1_0 *hdr;
1020         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1021         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1022         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1023         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1024         struct abm *abm = adev->dm.dc->res_pool->abm;
1025         struct dmub_srv_hw_params hw_params;
1026         enum dmub_status status;
1027         const unsigned char *fw_inst_const, *fw_bss_data;
1028         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1029         bool has_hw_support;
1030
1031         if (!dmub_srv)
1032                 /* DMUB isn't supported on the ASIC. */
1033                 return 0;
1034
1035         if (!fb_info) {
1036                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1037                 return -EINVAL;
1038         }
1039
1040         if (!dmub_fw) {
1041                 /* Firmware required for DMUB support. */
1042                 DRM_ERROR("No firmware provided for DMUB.\n");
1043                 return -EINVAL;
1044         }
1045
1046         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1047         if (status != DMUB_STATUS_OK) {
1048                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049                 return -EINVAL;
1050         }
1051
1052         if (!has_hw_support) {
1053                 DRM_INFO("DMUB unsupported on ASIC\n");
1054                 return 0;
1055         }
1056
1057         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1058         status = dmub_srv_hw_reset(dmub_srv);
1059         if (status != DMUB_STATUS_OK)
1060                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1061
1062         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1063
1064         fw_inst_const = dmub_fw->data +
1065                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1066                         PSP_HEADER_BYTES;
1067
1068         fw_bss_data = dmub_fw->data +
1069                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070                       le32_to_cpu(hdr->inst_const_bytes);
1071
1072         /* Copy firmware and bios info into FB memory. */
1073         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1074                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1075
1076         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1077
1078         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1079          * amdgpu_ucode_init_single_fw will load dmub firmware
1080          * fw_inst_const part to cw0; otherwise, the firmware back door load
1081          * will be done by dm_dmub_hw_init
1082          */
1083         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1085                                 fw_inst_const_size);
1086         }
1087
1088         if (fw_bss_data_size)
1089                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1090                        fw_bss_data, fw_bss_data_size);
1091
1092         /* Copy firmware bios info into FB memory. */
1093         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1094                adev->bios_size);
1095
1096         /* Reset regions that need to be reset. */
1097         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1098         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1099
1100         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1101                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1102
1103         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1104                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1105
1106         /* Initialize hardware. */
1107         memset(&hw_params, 0, sizeof(hw_params));
1108         hw_params.fb_base = adev->gmc.fb_start;
1109         hw_params.fb_offset = adev->gmc.aper_base;
1110
1111         /* backdoor load firmware and trigger dmub running */
1112         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1113                 hw_params.load_inst_const = true;
1114
1115         if (dmcu)
1116                 hw_params.psp_version = dmcu->psp_version;
1117
1118         for (i = 0; i < fb_info->num_fb; ++i)
1119                 hw_params.fb[i] = &fb_info->fb[i];
1120
1121         switch (adev->ip_versions[DCE_HWIP][0]) {
1122         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1123                 hw_params.dpia_supported = true;
1124 #if defined(CONFIG_DRM_AMD_DC_DCN)
1125                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126 #endif
1127                 break;
1128         default:
1129                 break;
1130         }
1131
1132         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1133         if (status != DMUB_STATUS_OK) {
1134                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1135                 return -EINVAL;
1136         }
1137
1138         /* Wait for firmware load to finish. */
1139         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1140         if (status != DMUB_STATUS_OK)
1141                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1142
1143         /* Init DMCU and ABM if available. */
1144         if (dmcu && abm) {
1145                 dmcu->funcs->dmcu_init(dmcu);
1146                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1147         }
1148
1149         if (!adev->dm.dc->ctx->dmub_srv)
1150                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1151         if (!adev->dm.dc->ctx->dmub_srv) {
1152                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1153                 return -ENOMEM;
1154         }
1155
1156         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1157                  adev->dm.dmcub_fw_version);
1158
1159         return 0;
1160 }
1161
1162 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1163 {
1164         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1165         enum dmub_status status;
1166         bool init;
1167
1168         if (!dmub_srv) {
1169                 /* DMUB isn't supported on the ASIC. */
1170                 return;
1171         }
1172
1173         status = dmub_srv_is_hw_init(dmub_srv, &init);
1174         if (status != DMUB_STATUS_OK)
1175                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1176
1177         if (status == DMUB_STATUS_OK && init) {
1178                 /* Wait for firmware load to finish. */
1179                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1180                 if (status != DMUB_STATUS_OK)
1181                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1182         } else {
1183                 /* Perform the full hardware initialization. */
1184                 dm_dmub_hw_init(adev);
1185         }
1186 }
1187
1188 #if defined(CONFIG_DRM_AMD_DC_DCN)
1189 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1190 {
1191         uint64_t pt_base;
1192         uint32_t logical_addr_low;
1193         uint32_t logical_addr_high;
1194         uint32_t agp_base, agp_bot, agp_top;
1195         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1196
1197         memset(pa_config, 0, sizeof(*pa_config));
1198
1199         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1200         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1201
1202         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1203                 /*
1204                  * Raven2 has a HW issue that it is unable to use the vram which
1205                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1206                  * workaround that increase system aperture high address (add 1)
1207                  * to get rid of the VM fault and hardware hang.
1208                  */
1209                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1210         else
1211                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1212
1213         agp_base = 0;
1214         agp_bot = adev->gmc.agp_start >> 24;
1215         agp_top = adev->gmc.agp_end >> 24;
1216
1217
1218         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1219         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1220         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1221         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1222         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1223         page_table_base.low_part = lower_32_bits(pt_base);
1224
1225         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1226         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1227
1228         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1229         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1230         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1231
1232         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1233         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1234         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1235
1236         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1237         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1238         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1239
1240         pa_config->is_hvm_enabled = 0;
1241
1242 }
1243 #endif
1244 #if defined(CONFIG_DRM_AMD_DC_DCN)
1245 static void vblank_control_worker(struct work_struct *work)
1246 {
1247         struct vblank_control_work *vblank_work =
1248                 container_of(work, struct vblank_control_work, work);
1249         struct amdgpu_display_manager *dm = vblank_work->dm;
1250
1251         mutex_lock(&dm->dc_lock);
1252
1253         if (vblank_work->enable)
1254                 dm->active_vblank_irq_count++;
1255         else if(dm->active_vblank_irq_count)
1256                 dm->active_vblank_irq_count--;
1257
1258         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1259
1260         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1261
1262         /* Control PSR based on vblank requirements from OS */
1263         if (vblank_work->stream && vblank_work->stream->link) {
1264                 if (vblank_work->enable) {
1265                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1266                                 amdgpu_dm_psr_disable(vblank_work->stream);
1267                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1268                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1269                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1270                         amdgpu_dm_psr_enable(vblank_work->stream);
1271                 }
1272         }
1273
1274         mutex_unlock(&dm->dc_lock);
1275
1276         dc_stream_release(vblank_work->stream);
1277
1278         kfree(vblank_work);
1279 }
1280
1281 #endif
1282
1283 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1284 {
1285         struct hpd_rx_irq_offload_work *offload_work;
1286         struct amdgpu_dm_connector *aconnector;
1287         struct dc_link *dc_link;
1288         struct amdgpu_device *adev;
1289         enum dc_connection_type new_connection_type = dc_connection_none;
1290         unsigned long flags;
1291
1292         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1293         aconnector = offload_work->offload_wq->aconnector;
1294
1295         if (!aconnector) {
1296                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1297                 goto skip;
1298         }
1299
1300         adev = drm_to_adev(aconnector->base.dev);
1301         dc_link = aconnector->dc_link;
1302
1303         mutex_lock(&aconnector->hpd_lock);
1304         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1305                 DRM_ERROR("KMS: Failed to detect connector\n");
1306         mutex_unlock(&aconnector->hpd_lock);
1307
1308         if (new_connection_type == dc_connection_none)
1309                 goto skip;
1310
1311         if (amdgpu_in_reset(adev))
1312                 goto skip;
1313
1314         mutex_lock(&adev->dm.dc_lock);
1315         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1316                 dc_link_dp_handle_automated_test(dc_link);
1317         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1318                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1319                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1320                 dc_link_dp_handle_link_loss(dc_link);
1321                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1322                 offload_work->offload_wq->is_handling_link_loss = false;
1323                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1324         }
1325         mutex_unlock(&adev->dm.dc_lock);
1326
1327 skip:
1328         kfree(offload_work);
1329
1330 }
1331
1332 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1333 {
1334         int max_caps = dc->caps.max_links;
1335         int i = 0;
1336         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1337
1338         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1339
1340         if (!hpd_rx_offload_wq)
1341                 return NULL;
1342
1343
1344         for (i = 0; i < max_caps; i++) {
1345                 hpd_rx_offload_wq[i].wq =
1346                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1347
1348                 if (hpd_rx_offload_wq[i].wq == NULL) {
1349                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1350                         return NULL;
1351                 }
1352
1353                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1354         }
1355
1356         return hpd_rx_offload_wq;
1357 }
1358
1359 struct amdgpu_stutter_quirk {
1360         u16 chip_vendor;
1361         u16 chip_device;
1362         u16 subsys_vendor;
1363         u16 subsys_device;
1364         u8 revision;
1365 };
1366
1367 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1368         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1369         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1370         { 0, 0, 0, 0, 0 },
1371 };
1372
1373 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1374 {
1375         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1376
1377         while (p && p->chip_device != 0) {
1378                 if (pdev->vendor == p->chip_vendor &&
1379                     pdev->device == p->chip_device &&
1380                     pdev->subsystem_vendor == p->subsys_vendor &&
1381                     pdev->subsystem_device == p->subsys_device &&
1382                     pdev->revision == p->revision) {
1383                         return true;
1384                 }
1385                 ++p;
1386         }
1387         return false;
1388 }
1389
1390 static int amdgpu_dm_init(struct amdgpu_device *adev)
1391 {
1392         struct dc_init_data init_data;
1393 #ifdef CONFIG_DRM_AMD_DC_HDCP
1394         struct dc_callback_init init_params;
1395 #endif
1396         int r;
1397
1398         adev->dm.ddev = adev_to_drm(adev);
1399         adev->dm.adev = adev;
1400
1401         /* Zero all the fields */
1402         memset(&init_data, 0, sizeof(init_data));
1403 #ifdef CONFIG_DRM_AMD_DC_HDCP
1404         memset(&init_params, 0, sizeof(init_params));
1405 #endif
1406
1407         mutex_init(&adev->dm.dc_lock);
1408         mutex_init(&adev->dm.audio_lock);
1409 #if defined(CONFIG_DRM_AMD_DC_DCN)
1410         spin_lock_init(&adev->dm.vblank_lock);
1411 #endif
1412
1413         if(amdgpu_dm_irq_init(adev)) {
1414                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1415                 goto error;
1416         }
1417
1418         init_data.asic_id.chip_family = adev->family;
1419
1420         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1421         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1422         init_data.asic_id.chip_id = adev->pdev->device;
1423
1424         init_data.asic_id.vram_width = adev->gmc.vram_width;
1425         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1426         init_data.asic_id.atombios_base_address =
1427                 adev->mode_info.atom_context->bios;
1428
1429         init_data.driver = adev;
1430
1431         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1432
1433         if (!adev->dm.cgs_device) {
1434                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1435                 goto error;
1436         }
1437
1438         init_data.cgs_device = adev->dm.cgs_device;
1439
1440         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1441
1442         switch (adev->asic_type) {
1443         case CHIP_CARRIZO:
1444         case CHIP_STONEY:
1445                 init_data.flags.gpu_vm_support = true;
1446                 break;
1447         default:
1448                 switch (adev->ip_versions[DCE_HWIP][0]) {
1449                 case IP_VERSION(2, 1, 0):
1450                         init_data.flags.gpu_vm_support = true;
1451                         switch (adev->dm.dmcub_fw_version) {
1452                         case 0: /* development */
1453                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1454                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1455                                 init_data.flags.disable_dmcu = false;
1456                                 break;
1457                         default:
1458                                 init_data.flags.disable_dmcu = true;
1459                         }
1460                         break;
1461                 case IP_VERSION(1, 0, 0):
1462                 case IP_VERSION(1, 0, 1):
1463                 case IP_VERSION(3, 0, 1):
1464                 case IP_VERSION(3, 1, 2):
1465                 case IP_VERSION(3, 1, 3):
1466                         init_data.flags.gpu_vm_support = true;
1467                         break;
1468                 case IP_VERSION(2, 0, 3):
1469                         init_data.flags.disable_dmcu = true;
1470                         break;
1471                 default:
1472                         break;
1473                 }
1474                 break;
1475         }
1476
1477         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1478                 init_data.flags.fbc_support = true;
1479
1480         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1481                 init_data.flags.multi_mon_pp_mclk_switch = true;
1482
1483         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1484                 init_data.flags.disable_fractional_pwm = true;
1485
1486         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1487                 init_data.flags.edp_no_power_sequencing = true;
1488
1489 #ifdef CONFIG_DRM_AMD_DC_DCN
1490         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1491                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1492         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1493                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1494 #endif
1495
1496         init_data.flags.seamless_boot_edp_requested = false;
1497
1498         if (check_seamless_boot_capability(adev)) {
1499                 init_data.flags.seamless_boot_edp_requested = true;
1500                 init_data.flags.allow_seamless_boot_optimization = true;
1501                 DRM_INFO("Seamless boot condition check passed\n");
1502         }
1503
1504         INIT_LIST_HEAD(&adev->dm.da_list);
1505         /* Display Core create. */
1506         adev->dm.dc = dc_create(&init_data);
1507
1508         if (adev->dm.dc) {
1509                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1510         } else {
1511                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1512                 goto error;
1513         }
1514
1515         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1516                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1517                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1518         }
1519
1520         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1521                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1522         if (dm_should_disable_stutter(adev->pdev))
1523                 adev->dm.dc->debug.disable_stutter = true;
1524
1525         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1526                 adev->dm.dc->debug.disable_stutter = true;
1527
1528         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1529                 adev->dm.dc->debug.disable_dsc = true;
1530                 adev->dm.dc->debug.disable_dsc_edp = true;
1531         }
1532
1533         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1534                 adev->dm.dc->debug.disable_clock_gate = true;
1535
1536         r = dm_dmub_hw_init(adev);
1537         if (r) {
1538                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1539                 goto error;
1540         }
1541
1542         dc_hardware_init(adev->dm.dc);
1543
1544         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1545         if (!adev->dm.hpd_rx_offload_wq) {
1546                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1547                 goto error;
1548         }
1549
1550 #if defined(CONFIG_DRM_AMD_DC_DCN)
1551         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1552                 struct dc_phy_addr_space_config pa_config;
1553
1554                 mmhub_read_system_context(adev, &pa_config);
1555
1556                 // Call the DC init_memory func
1557                 dc_setup_system_context(adev->dm.dc, &pa_config);
1558         }
1559 #endif
1560
1561         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1562         if (!adev->dm.freesync_module) {
1563                 DRM_ERROR(
1564                 "amdgpu: failed to initialize freesync_module.\n");
1565         } else
1566                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1567                                 adev->dm.freesync_module);
1568
1569         amdgpu_dm_init_color_mod();
1570
1571 #if defined(CONFIG_DRM_AMD_DC_DCN)
1572         if (adev->dm.dc->caps.max_links > 0) {
1573                 adev->dm.vblank_control_workqueue =
1574                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1575                 if (!adev->dm.vblank_control_workqueue)
1576                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1577         }
1578 #endif
1579
1580 #ifdef CONFIG_DRM_AMD_DC_HDCP
1581         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1582                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1583
1584                 if (!adev->dm.hdcp_workqueue)
1585                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1586                 else
1587                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1588
1589                 dc_init_callbacks(adev->dm.dc, &init_params);
1590         }
1591 #endif
1592 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1593         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1594 #endif
1595         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1596                 init_completion(&adev->dm.dmub_aux_transfer_done);
1597                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1598                 if (!adev->dm.dmub_notify) {
1599                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1600                         goto error;
1601                 }
1602
1603                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1604                 if (!adev->dm.delayed_hpd_wq) {
1605                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1606                         goto error;
1607                 }
1608
1609                 amdgpu_dm_outbox_init(adev);
1610 #if defined(CONFIG_DRM_AMD_DC_DCN)
1611                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1612                         dmub_aux_setconfig_callback, false)) {
1613                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1614                         goto error;
1615                 }
1616                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1617                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1618                         goto error;
1619                 }
1620                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1621                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1622                         goto error;
1623                 }
1624 #endif /* CONFIG_DRM_AMD_DC_DCN */
1625         }
1626
1627         if (amdgpu_dm_initialize_drm_device(adev)) {
1628                 DRM_ERROR(
1629                 "amdgpu: failed to initialize sw for display support.\n");
1630                 goto error;
1631         }
1632
1633         /* create fake encoders for MST */
1634         dm_dp_create_fake_mst_encoders(adev);
1635
1636         /* TODO: Add_display_info? */
1637
1638         /* TODO use dynamic cursor width */
1639         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641
1642         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643                 DRM_ERROR(
1644                 "amdgpu: failed to initialize sw for display support.\n");
1645                 goto error;
1646         }
1647
1648
1649         DRM_DEBUG_DRIVER("KMS initialized.\n");
1650
1651         return 0;
1652 error:
1653         amdgpu_dm_fini(adev);
1654
1655         return -EINVAL;
1656 }
1657
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661
1662         amdgpu_dm_audio_fini(adev);
1663
1664         return 0;
1665 }
1666
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669         int i;
1670
1671 #if defined(CONFIG_DRM_AMD_DC_DCN)
1672         if (adev->dm.vblank_control_workqueue) {
1673                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1674                 adev->dm.vblank_control_workqueue = NULL;
1675         }
1676 #endif
1677
1678         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1679                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1680         }
1681
1682         amdgpu_dm_destroy_drm_device(&adev->dm);
1683
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685         if (adev->dm.crc_rd_wrk) {
1686                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1687                 kfree(adev->dm.crc_rd_wrk);
1688                 adev->dm.crc_rd_wrk = NULL;
1689         }
1690 #endif
1691 #ifdef CONFIG_DRM_AMD_DC_HDCP
1692         if (adev->dm.hdcp_workqueue) {
1693                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1694                 adev->dm.hdcp_workqueue = NULL;
1695         }
1696
1697         if (adev->dm.dc)
1698                 dc_deinit_callbacks(adev->dm.dc);
1699 #endif
1700
1701         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1702
1703         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1704                 kfree(adev->dm.dmub_notify);
1705                 adev->dm.dmub_notify = NULL;
1706                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1707                 adev->dm.delayed_hpd_wq = NULL;
1708         }
1709
1710         if (adev->dm.dmub_bo)
1711                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1712                                       &adev->dm.dmub_bo_gpu_addr,
1713                                       &adev->dm.dmub_bo_cpu_addr);
1714
1715         if (adev->dm.hpd_rx_offload_wq) {
1716                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1717                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1718                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1719                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1720                         }
1721                 }
1722
1723                 kfree(adev->dm.hpd_rx_offload_wq);
1724                 adev->dm.hpd_rx_offload_wq = NULL;
1725         }
1726
1727         /* DC Destroy TODO: Replace destroy DAL */
1728         if (adev->dm.dc)
1729                 dc_destroy(&adev->dm.dc);
1730         /*
1731          * TODO: pageflip, vlank interrupt
1732          *
1733          * amdgpu_dm_irq_fini(adev);
1734          */
1735
1736         if (adev->dm.cgs_device) {
1737                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1738                 adev->dm.cgs_device = NULL;
1739         }
1740         if (adev->dm.freesync_module) {
1741                 mod_freesync_destroy(adev->dm.freesync_module);
1742                 adev->dm.freesync_module = NULL;
1743         }
1744
1745         mutex_destroy(&adev->dm.audio_lock);
1746         mutex_destroy(&adev->dm.dc_lock);
1747
1748         return;
1749 }
1750
1751 static int load_dmcu_fw(struct amdgpu_device *adev)
1752 {
1753         const char *fw_name_dmcu = NULL;
1754         int r;
1755         const struct dmcu_firmware_header_v1_0 *hdr;
1756
1757         switch(adev->asic_type) {
1758 #if defined(CONFIG_DRM_AMD_DC_SI)
1759         case CHIP_TAHITI:
1760         case CHIP_PITCAIRN:
1761         case CHIP_VERDE:
1762         case CHIP_OLAND:
1763 #endif
1764         case CHIP_BONAIRE:
1765         case CHIP_HAWAII:
1766         case CHIP_KAVERI:
1767         case CHIP_KABINI:
1768         case CHIP_MULLINS:
1769         case CHIP_TONGA:
1770         case CHIP_FIJI:
1771         case CHIP_CARRIZO:
1772         case CHIP_STONEY:
1773         case CHIP_POLARIS11:
1774         case CHIP_POLARIS10:
1775         case CHIP_POLARIS12:
1776         case CHIP_VEGAM:
1777         case CHIP_VEGA10:
1778         case CHIP_VEGA12:
1779         case CHIP_VEGA20:
1780                 return 0;
1781         case CHIP_NAVI12:
1782                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1783                 break;
1784         case CHIP_RAVEN:
1785                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1786                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1788                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789                 else
1790                         return 0;
1791                 break;
1792         default:
1793                 switch (adev->ip_versions[DCE_HWIP][0]) {
1794                 case IP_VERSION(2, 0, 2):
1795                 case IP_VERSION(2, 0, 3):
1796                 case IP_VERSION(2, 0, 0):
1797                 case IP_VERSION(2, 1, 0):
1798                 case IP_VERSION(3, 0, 0):
1799                 case IP_VERSION(3, 0, 2):
1800                 case IP_VERSION(3, 0, 3):
1801                 case IP_VERSION(3, 0, 1):
1802                 case IP_VERSION(3, 1, 2):
1803                 case IP_VERSION(3, 1, 3):
1804                         return 0;
1805                 default:
1806                         break;
1807                 }
1808                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809                 return -EINVAL;
1810         }
1811
1812         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814                 return 0;
1815         }
1816
1817         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818         if (r == -ENOENT) {
1819                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821                 adev->dm.fw_dmcu = NULL;
1822                 return 0;
1823         }
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826                         fw_name_dmcu);
1827                 return r;
1828         }
1829
1830         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831         if (r) {
1832                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833                         fw_name_dmcu);
1834                 release_firmware(adev->dm.fw_dmcu);
1835                 adev->dm.fw_dmcu = NULL;
1836                 return r;
1837         }
1838
1839         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842         adev->firmware.fw_size +=
1843                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844
1845         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847         adev->firmware.fw_size +=
1848                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849
1850         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851
1852         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853
1854         return 0;
1855 }
1856
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859         struct amdgpu_device *adev = ctx;
1860
1861         return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865                                      uint32_t value)
1866 {
1867         struct amdgpu_device *adev = ctx;
1868
1869         return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874         struct dmub_srv_create_params create_params;
1875         struct dmub_srv_region_params region_params;
1876         struct dmub_srv_region_info region_info;
1877         struct dmub_srv_fb_params fb_params;
1878         struct dmub_srv_fb_info *fb_info;
1879         struct dmub_srv *dmub_srv;
1880         const struct dmcub_firmware_header_v1_0 *hdr;
1881         const char *fw_name_dmub;
1882         enum dmub_asic dmub_asic;
1883         enum dmub_status status;
1884         int r;
1885
1886         switch (adev->ip_versions[DCE_HWIP][0]) {
1887         case IP_VERSION(2, 1, 0):
1888                 dmub_asic = DMUB_ASIC_DCN21;
1889                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892                 break;
1893         case IP_VERSION(3, 0, 0):
1894                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895                         dmub_asic = DMUB_ASIC_DCN30;
1896                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897                 } else {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900                 }
1901                 break;
1902         case IP_VERSION(3, 0, 1):
1903                 dmub_asic = DMUB_ASIC_DCN301;
1904                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905                 break;
1906         case IP_VERSION(3, 0, 2):
1907                 dmub_asic = DMUB_ASIC_DCN302;
1908                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909                 break;
1910         case IP_VERSION(3, 0, 3):
1911                 dmub_asic = DMUB_ASIC_DCN303;
1912                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913                 break;
1914         case IP_VERSION(3, 1, 2):
1915         case IP_VERSION(3, 1, 3):
1916                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918                 break;
1919
1920         default:
1921                 /* ASIC doesn't support DMUB. */
1922                 return 0;
1923         }
1924
1925         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1926         if (r) {
1927                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1928                 return 0;
1929         }
1930
1931         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1932         if (r) {
1933                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1934                 return 0;
1935         }
1936
1937         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1938         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1939
1940         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1941                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1942                         AMDGPU_UCODE_ID_DMCUB;
1943                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1944                         adev->dm.dmub_fw;
1945                 adev->firmware.fw_size +=
1946                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1947
1948                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1949                          adev->dm.dmcub_fw_version);
1950         }
1951
1952
1953         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1954         dmub_srv = adev->dm.dmub_srv;
1955
1956         if (!dmub_srv) {
1957                 DRM_ERROR("Failed to allocate DMUB service!\n");
1958                 return -ENOMEM;
1959         }
1960
1961         memset(&create_params, 0, sizeof(create_params));
1962         create_params.user_ctx = adev;
1963         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1964         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1965         create_params.asic = dmub_asic;
1966
1967         /* Create the DMUB service. */
1968         status = dmub_srv_create(dmub_srv, &create_params);
1969         if (status != DMUB_STATUS_OK) {
1970                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1971                 return -EINVAL;
1972         }
1973
1974         /* Calculate the size of all the regions for the DMUB service. */
1975         memset(&region_params, 0, sizeof(region_params));
1976
1977         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1978                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1979         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1980         region_params.vbios_size = adev->bios_size;
1981         region_params.fw_bss_data = region_params.bss_data_size ?
1982                 adev->dm.dmub_fw->data +
1983                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1984                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1985         region_params.fw_inst_const =
1986                 adev->dm.dmub_fw->data +
1987                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1988                 PSP_HEADER_BYTES;
1989
1990         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1991                                            &region_info);
1992
1993         if (status != DMUB_STATUS_OK) {
1994                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1995                 return -EINVAL;
1996         }
1997
1998         /*
1999          * Allocate a framebuffer based on the total size of all the regions.
2000          * TODO: Move this into GART.
2001          */
2002         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2003                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2004                                     &adev->dm.dmub_bo_gpu_addr,
2005                                     &adev->dm.dmub_bo_cpu_addr);
2006         if (r)
2007                 return r;
2008
2009         /* Rebase the regions on the framebuffer address. */
2010         memset(&fb_params, 0, sizeof(fb_params));
2011         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2012         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2013         fb_params.region_info = &region_info;
2014
2015         adev->dm.dmub_fb_info =
2016                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2017         fb_info = adev->dm.dmub_fb_info;
2018
2019         if (!fb_info) {
2020                 DRM_ERROR(
2021                         "Failed to allocate framebuffer info for DMUB service!\n");
2022                 return -ENOMEM;
2023         }
2024
2025         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2026         if (status != DMUB_STATUS_OK) {
2027                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2028                 return -EINVAL;
2029         }
2030
2031         return 0;
2032 }
2033
2034 static int dm_sw_init(void *handle)
2035 {
2036         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2037         int r;
2038
2039         r = dm_dmub_sw_init(adev);
2040         if (r)
2041                 return r;
2042
2043         return load_dmcu_fw(adev);
2044 }
2045
2046 static int dm_sw_fini(void *handle)
2047 {
2048         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2049
2050         kfree(adev->dm.dmub_fb_info);
2051         adev->dm.dmub_fb_info = NULL;
2052
2053         if (adev->dm.dmub_srv) {
2054                 dmub_srv_destroy(adev->dm.dmub_srv);
2055                 adev->dm.dmub_srv = NULL;
2056         }
2057
2058         release_firmware(adev->dm.dmub_fw);
2059         adev->dm.dmub_fw = NULL;
2060
2061         release_firmware(adev->dm.fw_dmcu);
2062         adev->dm.fw_dmcu = NULL;
2063
2064         return 0;
2065 }
2066
2067 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2068 {
2069         struct amdgpu_dm_connector *aconnector;
2070         struct drm_connector *connector;
2071         struct drm_connector_list_iter iter;
2072         int ret = 0;
2073
2074         drm_connector_list_iter_begin(dev, &iter);
2075         drm_for_each_connector_iter(connector, &iter) {
2076                 aconnector = to_amdgpu_dm_connector(connector);
2077                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2078                     aconnector->mst_mgr.aux) {
2079                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2080                                          aconnector,
2081                                          aconnector->base.base.id);
2082
2083                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2084                         if (ret < 0) {
2085                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2086                                 aconnector->dc_link->type =
2087                                         dc_connection_single;
2088                                 break;
2089                         }
2090                 }
2091         }
2092         drm_connector_list_iter_end(&iter);
2093
2094         return ret;
2095 }
2096
2097 static int dm_late_init(void *handle)
2098 {
2099         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2100
2101         struct dmcu_iram_parameters params;
2102         unsigned int linear_lut[16];
2103         int i;
2104         struct dmcu *dmcu = NULL;
2105
2106         dmcu = adev->dm.dc->res_pool->dmcu;
2107
2108         for (i = 0; i < 16; i++)
2109                 linear_lut[i] = 0xFFFF * i / 15;
2110
2111         params.set = 0;
2112         params.backlight_ramping_override = false;
2113         params.backlight_ramping_start = 0xCCCC;
2114         params.backlight_ramping_reduction = 0xCCCCCCCC;
2115         params.backlight_lut_array_size = 16;
2116         params.backlight_lut_array = linear_lut;
2117
2118         /* Min backlight level after ABM reduction,  Don't allow below 1%
2119          * 0xFFFF x 0.01 = 0x28F
2120          */
2121         params.min_abm_backlight = 0x28F;
2122         /* In the case where abm is implemented on dmcub,
2123         * dmcu object will be null.
2124         * ABM 2.4 and up are implemented on dmcub.
2125         */
2126         if (dmcu) {
2127                 if (!dmcu_load_iram(dmcu, params))
2128                         return -EINVAL;
2129         } else if (adev->dm.dc->ctx->dmub_srv) {
2130                 struct dc_link *edp_links[MAX_NUM_EDP];
2131                 int edp_num;
2132
2133                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2134                 for (i = 0; i < edp_num; i++) {
2135                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2136                                 return -EINVAL;
2137                 }
2138         }
2139
2140         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2141 }
2142
2143 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2144 {
2145         struct amdgpu_dm_connector *aconnector;
2146         struct drm_connector *connector;
2147         struct drm_connector_list_iter iter;
2148         struct drm_dp_mst_topology_mgr *mgr;
2149         int ret;
2150         bool need_hotplug = false;
2151
2152         drm_connector_list_iter_begin(dev, &iter);
2153         drm_for_each_connector_iter(connector, &iter) {
2154                 aconnector = to_amdgpu_dm_connector(connector);
2155                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2156                     aconnector->mst_port)
2157                         continue;
2158
2159                 mgr = &aconnector->mst_mgr;
2160
2161                 if (suspend) {
2162                         drm_dp_mst_topology_mgr_suspend(mgr);
2163                 } else {
2164                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2165                         if (ret < 0) {
2166                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2167                                 need_hotplug = true;
2168                         }
2169                 }
2170         }
2171         drm_connector_list_iter_end(&iter);
2172
2173         if (need_hotplug)
2174                 drm_kms_helper_hotplug_event(dev);
2175 }
2176
2177 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2178 {
2179         int ret = 0;
2180
2181         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2182          * on window driver dc implementation.
2183          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2184          * should be passed to smu during boot up and resume from s3.
2185          * boot up: dc calculate dcn watermark clock settings within dc_create,
2186          * dcn20_resource_construct
2187          * then call pplib functions below to pass the settings to smu:
2188          * smu_set_watermarks_for_clock_ranges
2189          * smu_set_watermarks_table
2190          * navi10_set_watermarks_table
2191          * smu_write_watermarks_table
2192          *
2193          * For Renoir, clock settings of dcn watermark are also fixed values.
2194          * dc has implemented different flow for window driver:
2195          * dc_hardware_init / dc_set_power_state
2196          * dcn10_init_hw
2197          * notify_wm_ranges
2198          * set_wm_ranges
2199          * -- Linux
2200          * smu_set_watermarks_for_clock_ranges
2201          * renoir_set_watermarks_table
2202          * smu_write_watermarks_table
2203          *
2204          * For Linux,
2205          * dc_hardware_init -> amdgpu_dm_init
2206          * dc_set_power_state --> dm_resume
2207          *
2208          * therefore, this function apply to navi10/12/14 but not Renoir
2209          * *
2210          */
2211         switch (adev->ip_versions[DCE_HWIP][0]) {
2212         case IP_VERSION(2, 0, 2):
2213         case IP_VERSION(2, 0, 0):
2214                 break;
2215         default:
2216                 return 0;
2217         }
2218
2219         ret = amdgpu_dpm_write_watermarks_table(adev);
2220         if (ret) {
2221                 DRM_ERROR("Failed to update WMTABLE!\n");
2222                 return ret;
2223         }
2224
2225         return 0;
2226 }
2227
2228 /**
2229  * dm_hw_init() - Initialize DC device
2230  * @handle: The base driver device containing the amdgpu_dm device.
2231  *
2232  * Initialize the &struct amdgpu_display_manager device. This involves calling
2233  * the initializers of each DM component, then populating the struct with them.
2234  *
2235  * Although the function implies hardware initialization, both hardware and
2236  * software are initialized here. Splitting them out to their relevant init
2237  * hooks is a future TODO item.
2238  *
2239  * Some notable things that are initialized here:
2240  *
2241  * - Display Core, both software and hardware
2242  * - DC modules that we need (freesync and color management)
2243  * - DRM software states
2244  * - Interrupt sources and handlers
2245  * - Vblank support
2246  * - Debug FS entries, if enabled
2247  */
2248 static int dm_hw_init(void *handle)
2249 {
2250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2251         /* Create DAL display manager */
2252         amdgpu_dm_init(adev);
2253         amdgpu_dm_hpd_init(adev);
2254
2255         return 0;
2256 }
2257
2258 /**
2259  * dm_hw_fini() - Teardown DC device
2260  * @handle: The base driver device containing the amdgpu_dm device.
2261  *
2262  * Teardown components within &struct amdgpu_display_manager that require
2263  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2264  * were loaded. Also flush IRQ workqueues and disable them.
2265  */
2266 static int dm_hw_fini(void *handle)
2267 {
2268         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2269
2270         amdgpu_dm_hpd_fini(adev);
2271
2272         amdgpu_dm_irq_fini(adev);
2273         amdgpu_dm_fini(adev);
2274         return 0;
2275 }
2276
2277
2278 static int dm_enable_vblank(struct drm_crtc *crtc);
2279 static void dm_disable_vblank(struct drm_crtc *crtc);
2280
2281 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2282                                  struct dc_state *state, bool enable)
2283 {
2284         enum dc_irq_source irq_source;
2285         struct amdgpu_crtc *acrtc;
2286         int rc = -EBUSY;
2287         int i = 0;
2288
2289         for (i = 0; i < state->stream_count; i++) {
2290                 acrtc = get_crtc_by_otg_inst(
2291                                 adev, state->stream_status[i].primary_otg_inst);
2292
2293                 if (acrtc && state->stream_status[i].plane_count != 0) {
2294                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2295                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2296                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2297                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2298                         if (rc)
2299                                 DRM_WARN("Failed to %s pflip interrupts\n",
2300                                          enable ? "enable" : "disable");
2301
2302                         if (enable) {
2303                                 rc = dm_enable_vblank(&acrtc->base);
2304                                 if (rc)
2305                                         DRM_WARN("Failed to enable vblank interrupts\n");
2306                         } else {
2307                                 dm_disable_vblank(&acrtc->base);
2308                         }
2309
2310                 }
2311         }
2312
2313 }
2314
2315 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2316 {
2317         struct dc_state *context = NULL;
2318         enum dc_status res = DC_ERROR_UNEXPECTED;
2319         int i;
2320         struct dc_stream_state *del_streams[MAX_PIPES];
2321         int del_streams_count = 0;
2322
2323         memset(del_streams, 0, sizeof(del_streams));
2324
2325         context = dc_create_state(dc);
2326         if (context == NULL)
2327                 goto context_alloc_fail;
2328
2329         dc_resource_state_copy_construct_current(dc, context);
2330
2331         /* First remove from context all streams */
2332         for (i = 0; i < context->stream_count; i++) {
2333                 struct dc_stream_state *stream = context->streams[i];
2334
2335                 del_streams[del_streams_count++] = stream;
2336         }
2337
2338         /* Remove all planes for removed streams and then remove the streams */
2339         for (i = 0; i < del_streams_count; i++) {
2340                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2341                         res = DC_FAIL_DETACH_SURFACES;
2342                         goto fail;
2343                 }
2344
2345                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2346                 if (res != DC_OK)
2347                         goto fail;
2348         }
2349
2350         res = dc_commit_state(dc, context);
2351
2352 fail:
2353         dc_release_state(context);
2354
2355 context_alloc_fail:
2356         return res;
2357 }
2358
2359 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2360 {
2361         int i;
2362
2363         if (dm->hpd_rx_offload_wq) {
2364                 for (i = 0; i < dm->dc->caps.max_links; i++)
2365                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2366         }
2367 }
2368
2369 static int dm_suspend(void *handle)
2370 {
2371         struct amdgpu_device *adev = handle;
2372         struct amdgpu_display_manager *dm = &adev->dm;
2373         int ret = 0;
2374
2375         if (amdgpu_in_reset(adev)) {
2376                 mutex_lock(&dm->dc_lock);
2377
2378 #if defined(CONFIG_DRM_AMD_DC_DCN)
2379                 dc_allow_idle_optimizations(adev->dm.dc, false);
2380 #endif
2381
2382                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2383
2384                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2385
2386                 amdgpu_dm_commit_zero_streams(dm->dc);
2387
2388                 amdgpu_dm_irq_suspend(adev);
2389
2390                 hpd_rx_irq_work_suspend(dm);
2391
2392                 return ret;
2393         }
2394
2395         WARN_ON(adev->dm.cached_state);
2396         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2397
2398         s3_handle_mst(adev_to_drm(adev), true);
2399
2400         amdgpu_dm_irq_suspend(adev);
2401
2402         hpd_rx_irq_work_suspend(dm);
2403
2404         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2405
2406         return 0;
2407 }
2408
2409 static struct amdgpu_dm_connector *
2410 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2411                                              struct drm_crtc *crtc)
2412 {
2413         uint32_t i;
2414         struct drm_connector_state *new_con_state;
2415         struct drm_connector *connector;
2416         struct drm_crtc *crtc_from_state;
2417
2418         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2419                 crtc_from_state = new_con_state->crtc;
2420
2421                 if (crtc_from_state == crtc)
2422                         return to_amdgpu_dm_connector(connector);
2423         }
2424
2425         return NULL;
2426 }
2427
2428 static void emulated_link_detect(struct dc_link *link)
2429 {
2430         struct dc_sink_init_data sink_init_data = { 0 };
2431         struct display_sink_capability sink_caps = { 0 };
2432         enum dc_edid_status edid_status;
2433         struct dc_context *dc_ctx = link->ctx;
2434         struct dc_sink *sink = NULL;
2435         struct dc_sink *prev_sink = NULL;
2436
2437         link->type = dc_connection_none;
2438         prev_sink = link->local_sink;
2439
2440         if (prev_sink)
2441                 dc_sink_release(prev_sink);
2442
2443         switch (link->connector_signal) {
2444         case SIGNAL_TYPE_HDMI_TYPE_A: {
2445                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2446                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2447                 break;
2448         }
2449
2450         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2451                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2452                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2453                 break;
2454         }
2455
2456         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2457                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2458                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2459                 break;
2460         }
2461
2462         case SIGNAL_TYPE_LVDS: {
2463                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2464                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2465                 break;
2466         }
2467
2468         case SIGNAL_TYPE_EDP: {
2469                 sink_caps.transaction_type =
2470                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2471                 sink_caps.signal = SIGNAL_TYPE_EDP;
2472                 break;
2473         }
2474
2475         case SIGNAL_TYPE_DISPLAY_PORT: {
2476                 sink_caps.transaction_type =
2477                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2479                 break;
2480         }
2481
2482         default:
2483                 DC_ERROR("Invalid connector type! signal:%d\n",
2484                         link->connector_signal);
2485                 return;
2486         }
2487
2488         sink_init_data.link = link;
2489         sink_init_data.sink_signal = sink_caps.signal;
2490
2491         sink = dc_sink_create(&sink_init_data);
2492         if (!sink) {
2493                 DC_ERROR("Failed to create sink!\n");
2494                 return;
2495         }
2496
2497         /* dc_sink_create returns a new reference */
2498         link->local_sink = sink;
2499
2500         edid_status = dm_helpers_read_local_edid(
2501                         link->ctx,
2502                         link,
2503                         sink);
2504
2505         if (edid_status != EDID_OK)
2506                 DC_ERROR("Failed to read EDID");
2507
2508 }
2509
2510 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2511                                      struct amdgpu_display_manager *dm)
2512 {
2513         struct {
2514                 struct dc_surface_update surface_updates[MAX_SURFACES];
2515                 struct dc_plane_info plane_infos[MAX_SURFACES];
2516                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2517                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2518                 struct dc_stream_update stream_update;
2519         } * bundle;
2520         int k, m;
2521
2522         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2523
2524         if (!bundle) {
2525                 dm_error("Failed to allocate update bundle\n");
2526                 goto cleanup;
2527         }
2528
2529         for (k = 0; k < dc_state->stream_count; k++) {
2530                 bundle->stream_update.stream = dc_state->streams[k];
2531
2532                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2533                         bundle->surface_updates[m].surface =
2534                                 dc_state->stream_status->plane_states[m];
2535                         bundle->surface_updates[m].surface->force_full_update =
2536                                 true;
2537                 }
2538                 dc_commit_updates_for_stream(
2539                         dm->dc, bundle->surface_updates,
2540                         dc_state->stream_status->plane_count,
2541                         dc_state->streams[k], &bundle->stream_update, dc_state);
2542         }
2543
2544 cleanup:
2545         kfree(bundle);
2546
2547         return;
2548 }
2549
2550 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2551 {
2552         struct dc_stream_state *stream_state;
2553         struct amdgpu_dm_connector *aconnector = link->priv;
2554         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2555         struct dc_stream_update stream_update;
2556         bool dpms_off = true;
2557
2558         memset(&stream_update, 0, sizeof(stream_update));
2559         stream_update.dpms_off = &dpms_off;
2560
2561         mutex_lock(&adev->dm.dc_lock);
2562         stream_state = dc_stream_find_from_link(link);
2563
2564         if (stream_state == NULL) {
2565                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2566                 mutex_unlock(&adev->dm.dc_lock);
2567                 return;
2568         }
2569
2570         stream_update.stream = stream_state;
2571         acrtc_state->force_dpms_off = true;
2572         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2573                                      stream_state, &stream_update,
2574                                      stream_state->ctx->dc->current_state);
2575         mutex_unlock(&adev->dm.dc_lock);
2576 }
2577
2578 static int dm_resume(void *handle)
2579 {
2580         struct amdgpu_device *adev = handle;
2581         struct drm_device *ddev = adev_to_drm(adev);
2582         struct amdgpu_display_manager *dm = &adev->dm;
2583         struct amdgpu_dm_connector *aconnector;
2584         struct drm_connector *connector;
2585         struct drm_connector_list_iter iter;
2586         struct drm_crtc *crtc;
2587         struct drm_crtc_state *new_crtc_state;
2588         struct dm_crtc_state *dm_new_crtc_state;
2589         struct drm_plane *plane;
2590         struct drm_plane_state *new_plane_state;
2591         struct dm_plane_state *dm_new_plane_state;
2592         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2593         enum dc_connection_type new_connection_type = dc_connection_none;
2594         struct dc_state *dc_state;
2595         int i, r, j;
2596
2597         if (amdgpu_in_reset(adev)) {
2598                 dc_state = dm->cached_dc_state;
2599
2600                 /*
2601                  * The dc->current_state is backed up into dm->cached_dc_state
2602                  * before we commit 0 streams.
2603                  *
2604                  * DC will clear link encoder assignments on the real state
2605                  * but the changes won't propagate over to the copy we made
2606                  * before the 0 streams commit.
2607                  *
2608                  * DC expects that link encoder assignments are *not* valid
2609                  * when committing a state, so as a workaround it needs to be
2610                  * cleared here.
2611                  */
2612                 link_enc_cfg_init(dm->dc, dc_state);
2613
2614                 if (dc_enable_dmub_notifications(adev->dm.dc))
2615                         amdgpu_dm_outbox_init(adev);
2616
2617                 r = dm_dmub_hw_init(adev);
2618                 if (r)
2619                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2620
2621                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2622                 dc_resume(dm->dc);
2623
2624                 amdgpu_dm_irq_resume_early(adev);
2625
2626                 for (i = 0; i < dc_state->stream_count; i++) {
2627                         dc_state->streams[i]->mode_changed = true;
2628                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2629                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2630                                         = 0xffffffff;
2631                         }
2632                 }
2633
2634                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2635
2636                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2637
2638                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2639
2640                 dc_release_state(dm->cached_dc_state);
2641                 dm->cached_dc_state = NULL;
2642
2643                 amdgpu_dm_irq_resume_late(adev);
2644
2645                 mutex_unlock(&dm->dc_lock);
2646
2647                 return 0;
2648         }
2649         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2650         dc_release_state(dm_state->context);
2651         dm_state->context = dc_create_state(dm->dc);
2652         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2653         dc_resource_state_construct(dm->dc, dm_state->context);
2654
2655         /* Re-enable outbox interrupts for DPIA. */
2656         if (dc_enable_dmub_notifications(adev->dm.dc))
2657                 amdgpu_dm_outbox_init(adev);
2658
2659         /* Before powering on DC we need to re-initialize DMUB. */
2660         dm_dmub_hw_resume(adev);
2661
2662         /* power on hardware */
2663         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2664
2665         /* program HPD filter */
2666         dc_resume(dm->dc);
2667
2668         /*
2669          * early enable HPD Rx IRQ, should be done before set mode as short
2670          * pulse interrupts are used for MST
2671          */
2672         amdgpu_dm_irq_resume_early(adev);
2673
2674         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2675         s3_handle_mst(ddev, false);
2676
2677         /* Do detection*/
2678         drm_connector_list_iter_begin(ddev, &iter);
2679         drm_for_each_connector_iter(connector, &iter) {
2680                 aconnector = to_amdgpu_dm_connector(connector);
2681
2682                 /*
2683                  * this is the case when traversing through already created
2684                  * MST connectors, should be skipped
2685                  */
2686                 if (aconnector->mst_port)
2687                         continue;
2688
2689                 mutex_lock(&aconnector->hpd_lock);
2690                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2691                         DRM_ERROR("KMS: Failed to detect connector\n");
2692
2693                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2694                         emulated_link_detect(aconnector->dc_link);
2695                 else
2696                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2697
2698                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2699                         aconnector->fake_enable = false;
2700
2701                 if (aconnector->dc_sink)
2702                         dc_sink_release(aconnector->dc_sink);
2703                 aconnector->dc_sink = NULL;
2704                 amdgpu_dm_update_connector_after_detect(aconnector);
2705                 mutex_unlock(&aconnector->hpd_lock);
2706         }
2707         drm_connector_list_iter_end(&iter);
2708
2709         /* Force mode set in atomic commit */
2710         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2711                 new_crtc_state->active_changed = true;
2712
2713         /*
2714          * atomic_check is expected to create the dc states. We need to release
2715          * them here, since they were duplicated as part of the suspend
2716          * procedure.
2717          */
2718         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2719                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2720                 if (dm_new_crtc_state->stream) {
2721                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2722                         dc_stream_release(dm_new_crtc_state->stream);
2723                         dm_new_crtc_state->stream = NULL;
2724                 }
2725         }
2726
2727         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2728                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2729                 if (dm_new_plane_state->dc_state) {
2730                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2731                         dc_plane_state_release(dm_new_plane_state->dc_state);
2732                         dm_new_plane_state->dc_state = NULL;
2733                 }
2734         }
2735
2736         drm_atomic_helper_resume(ddev, dm->cached_state);
2737
2738         dm->cached_state = NULL;
2739
2740         amdgpu_dm_irq_resume_late(adev);
2741
2742         amdgpu_dm_smu_write_watermarks_table(adev);
2743
2744         return 0;
2745 }
2746
2747 /**
2748  * DOC: DM Lifecycle
2749  *
2750  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2751  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2752  * the base driver's device list to be initialized and torn down accordingly.
2753  *
2754  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2755  */
2756
2757 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2758         .name = "dm",
2759         .early_init = dm_early_init,
2760         .late_init = dm_late_init,
2761         .sw_init = dm_sw_init,
2762         .sw_fini = dm_sw_fini,
2763         .early_fini = amdgpu_dm_early_fini,
2764         .hw_init = dm_hw_init,
2765         .hw_fini = dm_hw_fini,
2766         .suspend = dm_suspend,
2767         .resume = dm_resume,
2768         .is_idle = dm_is_idle,
2769         .wait_for_idle = dm_wait_for_idle,
2770         .check_soft_reset = dm_check_soft_reset,
2771         .soft_reset = dm_soft_reset,
2772         .set_clockgating_state = dm_set_clockgating_state,
2773         .set_powergating_state = dm_set_powergating_state,
2774 };
2775
2776 const struct amdgpu_ip_block_version dm_ip_block =
2777 {
2778         .type = AMD_IP_BLOCK_TYPE_DCE,
2779         .major = 1,
2780         .minor = 0,
2781         .rev = 0,
2782         .funcs = &amdgpu_dm_funcs,
2783 };
2784
2785
2786 /**
2787  * DOC: atomic
2788  *
2789  * *WIP*
2790  */
2791
2792 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2793         .fb_create = amdgpu_display_user_framebuffer_create,
2794         .get_format_info = amd_get_format_info,
2795         .output_poll_changed = drm_fb_helper_output_poll_changed,
2796         .atomic_check = amdgpu_dm_atomic_check,
2797         .atomic_commit = drm_atomic_helper_commit,
2798 };
2799
2800 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2801         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2802 };
2803
2804 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2805 {
2806         u32 max_cll, min_cll, max, min, q, r;
2807         struct amdgpu_dm_backlight_caps *caps;
2808         struct amdgpu_display_manager *dm;
2809         struct drm_connector *conn_base;
2810         struct amdgpu_device *adev;
2811         struct dc_link *link = NULL;
2812         static const u8 pre_computed_values[] = {
2813                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2814                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2815         int i;
2816
2817         if (!aconnector || !aconnector->dc_link)
2818                 return;
2819
2820         link = aconnector->dc_link;
2821         if (link->connector_signal != SIGNAL_TYPE_EDP)
2822                 return;
2823
2824         conn_base = &aconnector->base;
2825         adev = drm_to_adev(conn_base->dev);
2826         dm = &adev->dm;
2827         for (i = 0; i < dm->num_of_edps; i++) {
2828                 if (link == dm->backlight_link[i])
2829                         break;
2830         }
2831         if (i >= dm->num_of_edps)
2832                 return;
2833         caps = &dm->backlight_caps[i];
2834         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2835         caps->aux_support = false;
2836         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2837         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2838
2839         if (caps->ext_caps->bits.oled == 1 /*||
2840             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2841             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2842                 caps->aux_support = true;
2843
2844         if (amdgpu_backlight == 0)
2845                 caps->aux_support = false;
2846         else if (amdgpu_backlight == 1)
2847                 caps->aux_support = true;
2848
2849         /* From the specification (CTA-861-G), for calculating the maximum
2850          * luminance we need to use:
2851          *      Luminance = 50*2**(CV/32)
2852          * Where CV is a one-byte value.
2853          * For calculating this expression we may need float point precision;
2854          * to avoid this complexity level, we take advantage that CV is divided
2855          * by a constant. From the Euclids division algorithm, we know that CV
2856          * can be written as: CV = 32*q + r. Next, we replace CV in the
2857          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2858          * need to pre-compute the value of r/32. For pre-computing the values
2859          * We just used the following Ruby line:
2860          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2861          * The results of the above expressions can be verified at
2862          * pre_computed_values.
2863          */
2864         q = max_cll >> 5;
2865         r = max_cll % 32;
2866         max = (1 << q) * pre_computed_values[r];
2867
2868         // min luminance: maxLum * (CV/255)^2 / 100
2869         q = DIV_ROUND_CLOSEST(min_cll, 255);
2870         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2871
2872         caps->aux_max_input_signal = max;
2873         caps->aux_min_input_signal = min;
2874 }
2875
2876 void amdgpu_dm_update_connector_after_detect(
2877                 struct amdgpu_dm_connector *aconnector)
2878 {
2879         struct drm_connector *connector = &aconnector->base;
2880         struct drm_device *dev = connector->dev;
2881         struct dc_sink *sink;
2882
2883         /* MST handled by drm_mst framework */
2884         if (aconnector->mst_mgr.mst_state == true)
2885                 return;
2886
2887         sink = aconnector->dc_link->local_sink;
2888         if (sink)
2889                 dc_sink_retain(sink);
2890
2891         /*
2892          * Edid mgmt connector gets first update only in mode_valid hook and then
2893          * the connector sink is set to either fake or physical sink depends on link status.
2894          * Skip if already done during boot.
2895          */
2896         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2897                         && aconnector->dc_em_sink) {
2898
2899                 /*
2900                  * For S3 resume with headless use eml_sink to fake stream
2901                  * because on resume connector->sink is set to NULL
2902                  */
2903                 mutex_lock(&dev->mode_config.mutex);
2904
2905                 if (sink) {
2906                         if (aconnector->dc_sink) {
2907                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2908                                 /*
2909                                  * retain and release below are used to
2910                                  * bump up refcount for sink because the link doesn't point
2911                                  * to it anymore after disconnect, so on next crtc to connector
2912                                  * reshuffle by UMD we will get into unwanted dc_sink release
2913                                  */
2914                                 dc_sink_release(aconnector->dc_sink);
2915                         }
2916                         aconnector->dc_sink = sink;
2917                         dc_sink_retain(aconnector->dc_sink);
2918                         amdgpu_dm_update_freesync_caps(connector,
2919                                         aconnector->edid);
2920                 } else {
2921                         amdgpu_dm_update_freesync_caps(connector, NULL);
2922                         if (!aconnector->dc_sink) {
2923                                 aconnector->dc_sink = aconnector->dc_em_sink;
2924                                 dc_sink_retain(aconnector->dc_sink);
2925                         }
2926                 }
2927
2928                 mutex_unlock(&dev->mode_config.mutex);
2929
2930                 if (sink)
2931                         dc_sink_release(sink);
2932                 return;
2933         }
2934
2935         /*
2936          * TODO: temporary guard to look for proper fix
2937          * if this sink is MST sink, we should not do anything
2938          */
2939         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2940                 dc_sink_release(sink);
2941                 return;
2942         }
2943
2944         if (aconnector->dc_sink == sink) {
2945                 /*
2946                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2947                  * Do nothing!!
2948                  */
2949                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2950                                 aconnector->connector_id);
2951                 if (sink)
2952                         dc_sink_release(sink);
2953                 return;
2954         }
2955
2956         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2957                 aconnector->connector_id, aconnector->dc_sink, sink);
2958
2959         mutex_lock(&dev->mode_config.mutex);
2960
2961         /*
2962          * 1. Update status of the drm connector
2963          * 2. Send an event and let userspace tell us what to do
2964          */
2965         if (sink) {
2966                 /*
2967                  * TODO: check if we still need the S3 mode update workaround.
2968                  * If yes, put it here.
2969                  */
2970                 if (aconnector->dc_sink) {
2971                         amdgpu_dm_update_freesync_caps(connector, NULL);
2972                         dc_sink_release(aconnector->dc_sink);
2973                 }
2974
2975                 aconnector->dc_sink = sink;
2976                 dc_sink_retain(aconnector->dc_sink);
2977                 if (sink->dc_edid.length == 0) {
2978                         aconnector->edid = NULL;
2979                         if (aconnector->dc_link->aux_mode) {
2980                                 drm_dp_cec_unset_edid(
2981                                         &aconnector->dm_dp_aux.aux);
2982                         }
2983                 } else {
2984                         aconnector->edid =
2985                                 (struct edid *)sink->dc_edid.raw_edid;
2986
2987                         if (aconnector->dc_link->aux_mode)
2988                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2989                                                     aconnector->edid);
2990                 }
2991
2992                 drm_connector_update_edid_property(connector, aconnector->edid);
2993                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2994                 update_connector_ext_caps(aconnector);
2995         } else {
2996                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2997                 amdgpu_dm_update_freesync_caps(connector, NULL);
2998                 drm_connector_update_edid_property(connector, NULL);
2999                 aconnector->num_modes = 0;
3000                 dc_sink_release(aconnector->dc_sink);
3001                 aconnector->dc_sink = NULL;
3002                 aconnector->edid = NULL;
3003 #ifdef CONFIG_DRM_AMD_DC_HDCP
3004                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3005                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3006                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3007 #endif
3008         }
3009
3010         mutex_unlock(&dev->mode_config.mutex);
3011
3012         update_subconnector_property(aconnector);
3013
3014         if (sink)
3015                 dc_sink_release(sink);
3016 }
3017
3018 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3019 {
3020         struct drm_connector *connector = &aconnector->base;
3021         struct drm_device *dev = connector->dev;
3022         enum dc_connection_type new_connection_type = dc_connection_none;
3023         struct amdgpu_device *adev = drm_to_adev(dev);
3024         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3025         struct dm_crtc_state *dm_crtc_state = NULL;
3026
3027         if (adev->dm.disable_hpd_irq)
3028                 return;
3029
3030         if (dm_con_state->base.state && dm_con_state->base.crtc)
3031                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3032                                         dm_con_state->base.state,
3033                                         dm_con_state->base.crtc));
3034         /*
3035          * In case of failure or MST no need to update connector status or notify the OS
3036          * since (for MST case) MST does this in its own context.
3037          */
3038         mutex_lock(&aconnector->hpd_lock);
3039
3040 #ifdef CONFIG_DRM_AMD_DC_HDCP
3041         if (adev->dm.hdcp_workqueue) {
3042                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3043                 dm_con_state->update_hdcp = true;
3044         }
3045 #endif
3046         if (aconnector->fake_enable)
3047                 aconnector->fake_enable = false;
3048
3049         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3050                 DRM_ERROR("KMS: Failed to detect connector\n");
3051
3052         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3053                 emulated_link_detect(aconnector->dc_link);
3054
3055                 drm_modeset_lock_all(dev);
3056                 dm_restore_drm_connector_state(dev, connector);
3057                 drm_modeset_unlock_all(dev);
3058
3059                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3060                         drm_kms_helper_connector_hotplug_event(connector);
3061
3062         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3063                 if (new_connection_type == dc_connection_none &&
3064                     aconnector->dc_link->type == dc_connection_none &&
3065                     dm_crtc_state)
3066                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3067
3068                 amdgpu_dm_update_connector_after_detect(aconnector);
3069
3070                 drm_modeset_lock_all(dev);
3071                 dm_restore_drm_connector_state(dev, connector);
3072                 drm_modeset_unlock_all(dev);
3073
3074                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3075                         drm_kms_helper_connector_hotplug_event(connector);
3076         }
3077         mutex_unlock(&aconnector->hpd_lock);
3078
3079 }
3080
3081 static void handle_hpd_irq(void *param)
3082 {
3083         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3084
3085         handle_hpd_irq_helper(aconnector);
3086
3087 }
3088
3089 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3090 {
3091         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3092         uint8_t dret;
3093         bool new_irq_handled = false;
3094         int dpcd_addr;
3095         int dpcd_bytes_to_read;
3096
3097         const int max_process_count = 30;
3098         int process_count = 0;
3099
3100         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3101
3102         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3103                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3104                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3105                 dpcd_addr = DP_SINK_COUNT;
3106         } else {
3107                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3108                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3109                 dpcd_addr = DP_SINK_COUNT_ESI;
3110         }
3111
3112         dret = drm_dp_dpcd_read(
3113                 &aconnector->dm_dp_aux.aux,
3114                 dpcd_addr,
3115                 esi,
3116                 dpcd_bytes_to_read);
3117
3118         while (dret == dpcd_bytes_to_read &&
3119                 process_count < max_process_count) {
3120                 uint8_t retry;
3121                 dret = 0;
3122
3123                 process_count++;
3124
3125                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3126                 /* handle HPD short pulse irq */
3127                 if (aconnector->mst_mgr.mst_state)
3128                         drm_dp_mst_hpd_irq(
3129                                 &aconnector->mst_mgr,
3130                                 esi,
3131                                 &new_irq_handled);
3132
3133                 if (new_irq_handled) {
3134                         /* ACK at DPCD to notify down stream */
3135                         const int ack_dpcd_bytes_to_write =
3136                                 dpcd_bytes_to_read - 1;
3137
3138                         for (retry = 0; retry < 3; retry++) {
3139                                 uint8_t wret;
3140
3141                                 wret = drm_dp_dpcd_write(
3142                                         &aconnector->dm_dp_aux.aux,
3143                                         dpcd_addr + 1,
3144                                         &esi[1],
3145                                         ack_dpcd_bytes_to_write);
3146                                 if (wret == ack_dpcd_bytes_to_write)
3147                                         break;
3148                         }
3149
3150                         /* check if there is new irq to be handled */
3151                         dret = drm_dp_dpcd_read(
3152                                 &aconnector->dm_dp_aux.aux,
3153                                 dpcd_addr,
3154                                 esi,
3155                                 dpcd_bytes_to_read);
3156
3157                         new_irq_handled = false;
3158                 } else {
3159                         break;
3160                 }
3161         }
3162
3163         if (process_count == max_process_count)
3164                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3165 }
3166
3167 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3168                                                         union hpd_irq_data hpd_irq_data)
3169 {
3170         struct hpd_rx_irq_offload_work *offload_work =
3171                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3172
3173         if (!offload_work) {
3174                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3175                 return;
3176         }
3177
3178         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3179         offload_work->data = hpd_irq_data;
3180         offload_work->offload_wq = offload_wq;
3181
3182         queue_work(offload_wq->wq, &offload_work->work);
3183         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3184 }
3185
3186 static void handle_hpd_rx_irq(void *param)
3187 {
3188         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3189         struct drm_connector *connector = &aconnector->base;
3190         struct drm_device *dev = connector->dev;
3191         struct dc_link *dc_link = aconnector->dc_link;
3192         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3193         bool result = false;
3194         enum dc_connection_type new_connection_type = dc_connection_none;
3195         struct amdgpu_device *adev = drm_to_adev(dev);
3196         union hpd_irq_data hpd_irq_data;
3197         bool link_loss = false;
3198         bool has_left_work = false;
3199         int idx = aconnector->base.index;
3200         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3201
3202         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3203
3204         if (adev->dm.disable_hpd_irq)
3205                 return;
3206
3207         /*
3208          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3209          * conflict, after implement i2c helper, this mutex should be
3210          * retired.
3211          */
3212         mutex_lock(&aconnector->hpd_lock);
3213
3214         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3215                                                 &link_loss, true, &has_left_work);
3216
3217         if (!has_left_work)
3218                 goto out;
3219
3220         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3221                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3222                 goto out;
3223         }
3224
3225         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3226                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3227                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3228                         dm_handle_mst_sideband_msg(aconnector);
3229                         goto out;
3230                 }
3231
3232                 if (link_loss) {
3233                         bool skip = false;
3234
3235                         spin_lock(&offload_wq->offload_lock);
3236                         skip = offload_wq->is_handling_link_loss;
3237
3238                         if (!skip)
3239                                 offload_wq->is_handling_link_loss = true;
3240
3241                         spin_unlock(&offload_wq->offload_lock);
3242
3243                         if (!skip)
3244                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3245
3246                         goto out;
3247                 }
3248         }
3249
3250 out:
3251         if (result && !is_mst_root_connector) {
3252                 /* Downstream Port status changed. */
3253                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3254                         DRM_ERROR("KMS: Failed to detect connector\n");
3255
3256                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3257                         emulated_link_detect(dc_link);
3258
3259                         if (aconnector->fake_enable)
3260                                 aconnector->fake_enable = false;
3261
3262                         amdgpu_dm_update_connector_after_detect(aconnector);
3263
3264
3265                         drm_modeset_lock_all(dev);
3266                         dm_restore_drm_connector_state(dev, connector);
3267                         drm_modeset_unlock_all(dev);
3268
3269                         drm_kms_helper_connector_hotplug_event(connector);
3270                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3271
3272                         if (aconnector->fake_enable)
3273                                 aconnector->fake_enable = false;
3274
3275                         amdgpu_dm_update_connector_after_detect(aconnector);
3276
3277
3278                         drm_modeset_lock_all(dev);
3279                         dm_restore_drm_connector_state(dev, connector);
3280                         drm_modeset_unlock_all(dev);
3281
3282                         drm_kms_helper_connector_hotplug_event(connector);
3283                 }
3284         }
3285 #ifdef CONFIG_DRM_AMD_DC_HDCP
3286         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3287                 if (adev->dm.hdcp_workqueue)
3288                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3289         }
3290 #endif
3291
3292         if (dc_link->type != dc_connection_mst_branch)
3293                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3294
3295         mutex_unlock(&aconnector->hpd_lock);
3296 }
3297
3298 static void register_hpd_handlers(struct amdgpu_device *adev)
3299 {
3300         struct drm_device *dev = adev_to_drm(adev);
3301         struct drm_connector *connector;
3302         struct amdgpu_dm_connector *aconnector;
3303         const struct dc_link *dc_link;
3304         struct dc_interrupt_params int_params = {0};
3305
3306         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3307         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3308
3309         list_for_each_entry(connector,
3310                         &dev->mode_config.connector_list, head) {
3311
3312                 aconnector = to_amdgpu_dm_connector(connector);
3313                 dc_link = aconnector->dc_link;
3314
3315                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3316                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3317                         int_params.irq_source = dc_link->irq_source_hpd;
3318
3319                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3320                                         handle_hpd_irq,
3321                                         (void *) aconnector);
3322                 }
3323
3324                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3325
3326                         /* Also register for DP short pulse (hpd_rx). */
3327                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3329
3330                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331                                         handle_hpd_rx_irq,
3332                                         (void *) aconnector);
3333
3334                         if (adev->dm.hpd_rx_offload_wq)
3335                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3336                                         aconnector;
3337                 }
3338         }
3339 }
3340
3341 #if defined(CONFIG_DRM_AMD_DC_SI)
3342 /* Register IRQ sources and initialize IRQ callbacks */
3343 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3344 {
3345         struct dc *dc = adev->dm.dc;
3346         struct common_irq_params *c_irq_params;
3347         struct dc_interrupt_params int_params = {0};
3348         int r;
3349         int i;
3350         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3351
3352         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3353         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3354
3355         /*
3356          * Actions of amdgpu_irq_add_id():
3357          * 1. Register a set() function with base driver.
3358          *    Base driver will call set() function to enable/disable an
3359          *    interrupt in DC hardware.
3360          * 2. Register amdgpu_dm_irq_handler().
3361          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3362          *    coming from DC hardware.
3363          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3364          *    for acknowledging and handling. */
3365
3366         /* Use VBLANK interrupt */
3367         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3368                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3369                 if (r) {
3370                         DRM_ERROR("Failed to add crtc irq id!\n");
3371                         return r;
3372                 }
3373
3374                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3375                 int_params.irq_source =
3376                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3377
3378                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3379
3380                 c_irq_params->adev = adev;
3381                 c_irq_params->irq_src = int_params.irq_source;
3382
3383                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3384                                 dm_crtc_high_irq, c_irq_params);
3385         }
3386
3387         /* Use GRPH_PFLIP interrupt */
3388         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3389                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3390                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3391                 if (r) {
3392                         DRM_ERROR("Failed to add page flip irq id!\n");
3393                         return r;
3394                 }
3395
3396                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3397                 int_params.irq_source =
3398                         dc_interrupt_to_irq_source(dc, i, 0);
3399
3400                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3401
3402                 c_irq_params->adev = adev;
3403                 c_irq_params->irq_src = int_params.irq_source;
3404
3405                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3406                                 dm_pflip_high_irq, c_irq_params);
3407
3408         }
3409
3410         /* HPD */
3411         r = amdgpu_irq_add_id(adev, client_id,
3412                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3413         if (r) {
3414                 DRM_ERROR("Failed to add hpd irq id!\n");
3415                 return r;
3416         }
3417
3418         register_hpd_handlers(adev);
3419
3420         return 0;
3421 }
3422 #endif
3423
3424 /* Register IRQ sources and initialize IRQ callbacks */
3425 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3426 {
3427         struct dc *dc = adev->dm.dc;
3428         struct common_irq_params *c_irq_params;
3429         struct dc_interrupt_params int_params = {0};
3430         int r;
3431         int i;
3432         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3433
3434         if (adev->family >= AMDGPU_FAMILY_AI)
3435                 client_id = SOC15_IH_CLIENTID_DCE;
3436
3437         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3438         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3439
3440         /*
3441          * Actions of amdgpu_irq_add_id():
3442          * 1. Register a set() function with base driver.
3443          *    Base driver will call set() function to enable/disable an
3444          *    interrupt in DC hardware.
3445          * 2. Register amdgpu_dm_irq_handler().
3446          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3447          *    coming from DC hardware.
3448          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3449          *    for acknowledging and handling. */
3450
3451         /* Use VBLANK interrupt */
3452         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3453                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3454                 if (r) {
3455                         DRM_ERROR("Failed to add crtc irq id!\n");
3456                         return r;
3457                 }
3458
3459                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3460                 int_params.irq_source =
3461                         dc_interrupt_to_irq_source(dc, i, 0);
3462
3463                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3464
3465                 c_irq_params->adev = adev;
3466                 c_irq_params->irq_src = int_params.irq_source;
3467
3468                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3469                                 dm_crtc_high_irq, c_irq_params);
3470         }
3471
3472         /* Use VUPDATE interrupt */
3473         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3474                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3475                 if (r) {
3476                         DRM_ERROR("Failed to add vupdate irq id!\n");
3477                         return r;
3478                 }
3479
3480                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3481                 int_params.irq_source =
3482                         dc_interrupt_to_irq_source(dc, i, 0);
3483
3484                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3485
3486                 c_irq_params->adev = adev;
3487                 c_irq_params->irq_src = int_params.irq_source;
3488
3489                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3490                                 dm_vupdate_high_irq, c_irq_params);
3491         }
3492
3493         /* Use GRPH_PFLIP interrupt */
3494         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3495                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3496                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3497                 if (r) {
3498                         DRM_ERROR("Failed to add page flip irq id!\n");
3499                         return r;
3500                 }
3501
3502                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3503                 int_params.irq_source =
3504                         dc_interrupt_to_irq_source(dc, i, 0);
3505
3506                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3507
3508                 c_irq_params->adev = adev;
3509                 c_irq_params->irq_src = int_params.irq_source;
3510
3511                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3512                                 dm_pflip_high_irq, c_irq_params);
3513
3514         }
3515
3516         /* HPD */
3517         r = amdgpu_irq_add_id(adev, client_id,
3518                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3519         if (r) {
3520                 DRM_ERROR("Failed to add hpd irq id!\n");
3521                 return r;
3522         }
3523
3524         register_hpd_handlers(adev);
3525
3526         return 0;
3527 }
3528
3529 #if defined(CONFIG_DRM_AMD_DC_DCN)
3530 /* Register IRQ sources and initialize IRQ callbacks */
3531 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3532 {
3533         struct dc *dc = adev->dm.dc;
3534         struct common_irq_params *c_irq_params;
3535         struct dc_interrupt_params int_params = {0};
3536         int r;
3537         int i;
3538 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3539         static const unsigned int vrtl_int_srcid[] = {
3540                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3541                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3542                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3543                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3544                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3545                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3546         };
3547 #endif
3548
3549         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3550         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3551
3552         /*
3553          * Actions of amdgpu_irq_add_id():
3554          * 1. Register a set() function with base driver.
3555          *    Base driver will call set() function to enable/disable an
3556          *    interrupt in DC hardware.
3557          * 2. Register amdgpu_dm_irq_handler().
3558          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3559          *    coming from DC hardware.
3560          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3561          *    for acknowledging and handling.
3562          */
3563
3564         /* Use VSTARTUP interrupt */
3565         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3566                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3567                         i++) {
3568                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3569
3570                 if (r) {
3571                         DRM_ERROR("Failed to add crtc irq id!\n");
3572                         return r;
3573                 }
3574
3575                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3576                 int_params.irq_source =
3577                         dc_interrupt_to_irq_source(dc, i, 0);
3578
3579                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3580
3581                 c_irq_params->adev = adev;
3582                 c_irq_params->irq_src = int_params.irq_source;
3583
3584                 amdgpu_dm_irq_register_interrupt(
3585                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3586         }
3587
3588         /* Use otg vertical line interrupt */
3589 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3590         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3591                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3592                                 vrtl_int_srcid[i], &adev->vline0_irq);
3593
3594                 if (r) {
3595                         DRM_ERROR("Failed to add vline0 irq id!\n");
3596                         return r;
3597                 }
3598
3599                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3600                 int_params.irq_source =
3601                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3602
3603                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3604                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3605                         break;
3606                 }
3607
3608                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3609                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3610
3611                 c_irq_params->adev = adev;
3612                 c_irq_params->irq_src = int_params.irq_source;
3613
3614                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3615                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3616         }
3617 #endif
3618
3619         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3620          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3621          * to trigger at end of each vblank, regardless of state of the lock,
3622          * matching DCE behaviour.
3623          */
3624         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3625              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3626              i++) {
3627                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3628
3629                 if (r) {
3630                         DRM_ERROR("Failed to add vupdate irq id!\n");
3631                         return r;
3632                 }
3633
3634                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3635                 int_params.irq_source =
3636                         dc_interrupt_to_irq_source(dc, i, 0);
3637
3638                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3639
3640                 c_irq_params->adev = adev;
3641                 c_irq_params->irq_src = int_params.irq_source;
3642
3643                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644                                 dm_vupdate_high_irq, c_irq_params);
3645         }
3646
3647         /* Use GRPH_PFLIP interrupt */
3648         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3649                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3650                         i++) {
3651                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3652                 if (r) {
3653                         DRM_ERROR("Failed to add page flip irq id!\n");
3654                         return r;
3655                 }
3656
3657                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3658                 int_params.irq_source =
3659                         dc_interrupt_to_irq_source(dc, i, 0);
3660
3661                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3662
3663                 c_irq_params->adev = adev;
3664                 c_irq_params->irq_src = int_params.irq_source;
3665
3666                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3667                                 dm_pflip_high_irq, c_irq_params);
3668
3669         }
3670
3671         /* HPD */
3672         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3673                         &adev->hpd_irq);
3674         if (r) {
3675                 DRM_ERROR("Failed to add hpd irq id!\n");
3676                 return r;
3677         }
3678
3679         register_hpd_handlers(adev);
3680
3681         return 0;
3682 }
3683 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3684 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3685 {
3686         struct dc *dc = adev->dm.dc;
3687         struct common_irq_params *c_irq_params;
3688         struct dc_interrupt_params int_params = {0};
3689         int r, i;
3690
3691         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3692         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3693
3694         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3695                         &adev->dmub_outbox_irq);
3696         if (r) {
3697                 DRM_ERROR("Failed to add outbox irq id!\n");
3698                 return r;
3699         }
3700
3701         if (dc->ctx->dmub_srv) {
3702                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3703                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3704                 int_params.irq_source =
3705                 dc_interrupt_to_irq_source(dc, i, 0);
3706
3707                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3708
3709                 c_irq_params->adev = adev;
3710                 c_irq_params->irq_src = int_params.irq_source;
3711
3712                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3713                                 dm_dmub_outbox1_low_irq, c_irq_params);
3714         }
3715
3716         return 0;
3717 }
3718 #endif
3719
3720 /*
3721  * Acquires the lock for the atomic state object and returns
3722  * the new atomic state.
3723  *
3724  * This should only be called during atomic check.
3725  */
3726 static int dm_atomic_get_state(struct drm_atomic_state *state,
3727                                struct dm_atomic_state **dm_state)
3728 {
3729         struct drm_device *dev = state->dev;
3730         struct amdgpu_device *adev = drm_to_adev(dev);
3731         struct amdgpu_display_manager *dm = &adev->dm;
3732         struct drm_private_state *priv_state;
3733
3734         if (*dm_state)
3735                 return 0;
3736
3737         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3738         if (IS_ERR(priv_state))
3739                 return PTR_ERR(priv_state);
3740
3741         *dm_state = to_dm_atomic_state(priv_state);
3742
3743         return 0;
3744 }
3745
3746 static struct dm_atomic_state *
3747 dm_atomic_get_new_state(struct drm_atomic_state *state)
3748 {
3749         struct drm_device *dev = state->dev;
3750         struct amdgpu_device *adev = drm_to_adev(dev);
3751         struct amdgpu_display_manager *dm = &adev->dm;
3752         struct drm_private_obj *obj;
3753         struct drm_private_state *new_obj_state;
3754         int i;
3755
3756         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3757                 if (obj->funcs == dm->atomic_obj.funcs)
3758                         return to_dm_atomic_state(new_obj_state);
3759         }
3760
3761         return NULL;
3762 }
3763
3764 static struct drm_private_state *
3765 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3766 {
3767         struct dm_atomic_state *old_state, *new_state;
3768
3769         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3770         if (!new_state)
3771                 return NULL;
3772
3773         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3774
3775         old_state = to_dm_atomic_state(obj->state);
3776
3777         if (old_state && old_state->context)
3778                 new_state->context = dc_copy_state(old_state->context);
3779
3780         if (!new_state->context) {
3781                 kfree(new_state);
3782                 return NULL;
3783         }
3784
3785         return &new_state->base;
3786 }
3787
3788 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3789                                     struct drm_private_state *state)
3790 {
3791         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3792
3793         if (dm_state && dm_state->context)
3794                 dc_release_state(dm_state->context);
3795
3796         kfree(dm_state);
3797 }
3798
3799 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3800         .atomic_duplicate_state = dm_atomic_duplicate_state,
3801         .atomic_destroy_state = dm_atomic_destroy_state,
3802 };
3803
3804 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3805 {
3806         struct dm_atomic_state *state;
3807         int r;
3808
3809         adev->mode_info.mode_config_initialized = true;
3810
3811         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3812         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3813
3814         adev_to_drm(adev)->mode_config.max_width = 16384;
3815         adev_to_drm(adev)->mode_config.max_height = 16384;
3816
3817         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3818         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3819         /* indicates support for immediate flip */
3820         adev_to_drm(adev)->mode_config.async_page_flip = true;
3821
3822         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3823
3824         state = kzalloc(sizeof(*state), GFP_KERNEL);
3825         if (!state)
3826                 return -ENOMEM;
3827
3828         state->context = dc_create_state(adev->dm.dc);
3829         if (!state->context) {
3830                 kfree(state);
3831                 return -ENOMEM;
3832         }
3833
3834         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3835
3836         drm_atomic_private_obj_init(adev_to_drm(adev),
3837                                     &adev->dm.atomic_obj,
3838                                     &state->base,
3839                                     &dm_atomic_state_funcs);
3840
3841         r = amdgpu_display_modeset_create_props(adev);
3842         if (r) {
3843                 dc_release_state(state->context);
3844                 kfree(state);
3845                 return r;
3846         }
3847
3848         r = amdgpu_dm_audio_init(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         return 0;
3856 }
3857
3858 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3859 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3860 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3861
3862 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3863         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3864
3865 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3866                                             int bl_idx)
3867 {
3868 #if defined(CONFIG_ACPI)
3869         struct amdgpu_dm_backlight_caps caps;
3870
3871         memset(&caps, 0, sizeof(caps));
3872
3873         if (dm->backlight_caps[bl_idx].caps_valid)
3874                 return;
3875
3876         amdgpu_acpi_get_backlight_caps(&caps);
3877         if (caps.caps_valid) {
3878                 dm->backlight_caps[bl_idx].caps_valid = true;
3879                 if (caps.aux_support)
3880                         return;
3881                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3882                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3883         } else {
3884                 dm->backlight_caps[bl_idx].min_input_signal =
3885                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3886                 dm->backlight_caps[bl_idx].max_input_signal =
3887                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3888         }
3889 #else
3890         if (dm->backlight_caps[bl_idx].aux_support)
3891                 return;
3892
3893         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3894         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895 #endif
3896 }
3897
3898 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3899                                 unsigned *min, unsigned *max)
3900 {
3901         if (!caps)
3902                 return 0;
3903
3904         if (caps->aux_support) {
3905                 // Firmware limits are in nits, DC API wants millinits.
3906                 *max = 1000 * caps->aux_max_input_signal;
3907                 *min = 1000 * caps->aux_min_input_signal;
3908         } else {
3909                 // Firmware limits are 8-bit, PWM control is 16-bit.
3910                 *max = 0x101 * caps->max_input_signal;
3911                 *min = 0x101 * caps->min_input_signal;
3912         }
3913         return 1;
3914 }
3915
3916 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3917                                         uint32_t brightness)
3918 {
3919         unsigned min, max;
3920
3921         if (!get_brightness_range(caps, &min, &max))
3922                 return brightness;
3923
3924         // Rescale 0..255 to min..max
3925         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3926                                        AMDGPU_MAX_BL_LEVEL);
3927 }
3928
3929 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3930                                       uint32_t brightness)
3931 {
3932         unsigned min, max;
3933
3934         if (!get_brightness_range(caps, &min, &max))
3935                 return brightness;
3936
3937         if (brightness < min)
3938                 return 0;
3939         // Rescale min..max to 0..255
3940         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3941                                  max - min);
3942 }
3943
3944 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3945                                          int bl_idx,
3946                                          u32 user_brightness)
3947 {
3948         struct amdgpu_dm_backlight_caps caps;
3949         struct dc_link *link;
3950         u32 brightness;
3951         bool rc;
3952
3953         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3954         caps = dm->backlight_caps[bl_idx];
3955
3956         dm->brightness[bl_idx] = user_brightness;
3957         /* update scratch register */
3958         if (bl_idx == 0)
3959                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3960         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3961         link = (struct dc_link *)dm->backlight_link[bl_idx];
3962
3963         /* Change brightness based on AUX property */
3964         if (caps.aux_support) {
3965                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3966                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3967                 if (!rc)
3968                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3969         } else {
3970                 rc = dc_link_set_backlight_level(link, brightness, 0);
3971                 if (!rc)
3972                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3973         }
3974
3975         return rc ? 0 : 1;
3976 }
3977
3978 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3979 {
3980         struct amdgpu_display_manager *dm = bl_get_data(bd);
3981         int i;
3982
3983         for (i = 0; i < dm->num_of_edps; i++) {
3984                 if (bd == dm->backlight_dev[i])
3985                         break;
3986         }
3987         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3988                 i = 0;
3989         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3990
3991         return 0;
3992 }
3993
3994 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3995                                          int bl_idx)
3996 {
3997         struct amdgpu_dm_backlight_caps caps;
3998         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3999
4000         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4001         caps = dm->backlight_caps[bl_idx];
4002
4003         if (caps.aux_support) {
4004                 u32 avg, peak;
4005                 bool rc;
4006
4007                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4008                 if (!rc)
4009                         return dm->brightness[bl_idx];
4010                 return convert_brightness_to_user(&caps, avg);
4011         } else {
4012                 int ret = dc_link_get_backlight_level(link);
4013
4014                 if (ret == DC_ERROR_UNEXPECTED)
4015                         return dm->brightness[bl_idx];
4016                 return convert_brightness_to_user(&caps, ret);
4017         }
4018 }
4019
4020 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4021 {
4022         struct amdgpu_display_manager *dm = bl_get_data(bd);
4023         int i;
4024
4025         for (i = 0; i < dm->num_of_edps; i++) {
4026                 if (bd == dm->backlight_dev[i])
4027                         break;
4028         }
4029         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4030                 i = 0;
4031         return amdgpu_dm_backlight_get_level(dm, i);
4032 }
4033
4034 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4035         .options = BL_CORE_SUSPENDRESUME,
4036         .get_brightness = amdgpu_dm_backlight_get_brightness,
4037         .update_status  = amdgpu_dm_backlight_update_status,
4038 };
4039
4040 static void
4041 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4042 {
4043         char bl_name[16];
4044         struct backlight_properties props = { 0 };
4045
4046         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4047         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4048
4049         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4050         props.brightness = AMDGPU_MAX_BL_LEVEL;
4051         props.type = BACKLIGHT_RAW;
4052
4053         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4054                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4055
4056         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4057                                                                        adev_to_drm(dm->adev)->dev,
4058                                                                        dm,
4059                                                                        &amdgpu_dm_backlight_ops,
4060                                                                        &props);
4061
4062         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4063                 DRM_ERROR("DM: Backlight registration failed!\n");
4064         else
4065                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4066 }
4067 #endif
4068
4069 static int initialize_plane(struct amdgpu_display_manager *dm,
4070                             struct amdgpu_mode_info *mode_info, int plane_id,
4071                             enum drm_plane_type plane_type,
4072                             const struct dc_plane_cap *plane_cap)
4073 {
4074         struct drm_plane *plane;
4075         unsigned long possible_crtcs;
4076         int ret = 0;
4077
4078         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4079         if (!plane) {
4080                 DRM_ERROR("KMS: Failed to allocate plane\n");
4081                 return -ENOMEM;
4082         }
4083         plane->type = plane_type;
4084
4085         /*
4086          * HACK: IGT tests expect that the primary plane for a CRTC
4087          * can only have one possible CRTC. Only expose support for
4088          * any CRTC if they're not going to be used as a primary plane
4089          * for a CRTC - like overlay or underlay planes.
4090          */
4091         possible_crtcs = 1 << plane_id;
4092         if (plane_id >= dm->dc->caps.max_streams)
4093                 possible_crtcs = 0xff;
4094
4095         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4096
4097         if (ret) {
4098                 DRM_ERROR("KMS: Failed to initialize plane\n");
4099                 kfree(plane);
4100                 return ret;
4101         }
4102
4103         if (mode_info)
4104                 mode_info->planes[plane_id] = plane;
4105
4106         return ret;
4107 }
4108
4109
4110 static void register_backlight_device(struct amdgpu_display_manager *dm,
4111                                       struct dc_link *link)
4112 {
4113 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4114         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4115
4116         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4117             link->type != dc_connection_none) {
4118                 /*
4119                  * Event if registration failed, we should continue with
4120                  * DM initialization because not having a backlight control
4121                  * is better then a black screen.
4122                  */
4123                 if (!dm->backlight_dev[dm->num_of_edps])
4124                         amdgpu_dm_register_backlight_device(dm);
4125
4126                 if (dm->backlight_dev[dm->num_of_edps]) {
4127                         dm->backlight_link[dm->num_of_edps] = link;
4128                         dm->num_of_edps++;
4129                 }
4130         }
4131 #endif
4132 }
4133
4134
4135 /*
4136  * In this architecture, the association
4137  * connector -> encoder -> crtc
4138  * id not really requried. The crtc and connector will hold the
4139  * display_index as an abstraction to use with DAL component
4140  *
4141  * Returns 0 on success
4142  */
4143 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4144 {
4145         struct amdgpu_display_manager *dm = &adev->dm;
4146         int32_t i;
4147         struct amdgpu_dm_connector *aconnector = NULL;
4148         struct amdgpu_encoder *aencoder = NULL;
4149         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4150         uint32_t link_cnt;
4151         int32_t primary_planes;
4152         enum dc_connection_type new_connection_type = dc_connection_none;
4153         const struct dc_plane_cap *plane;
4154         bool psr_feature_enabled = false;
4155
4156         dm->display_indexes_num = dm->dc->caps.max_streams;
4157         /* Update the actual used number of crtc */
4158         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4159
4160         link_cnt = dm->dc->caps.max_links;
4161         if (amdgpu_dm_mode_config_init(dm->adev)) {
4162                 DRM_ERROR("DM: Failed to initialize mode config\n");
4163                 return -EINVAL;
4164         }
4165
4166         /* There is one primary plane per CRTC */
4167         primary_planes = dm->dc->caps.max_streams;
4168         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4169
4170         /*
4171          * Initialize primary planes, implicit planes for legacy IOCTLS.
4172          * Order is reversed to match iteration order in atomic check.
4173          */
4174         for (i = (primary_planes - 1); i >= 0; i--) {
4175                 plane = &dm->dc->caps.planes[i];
4176
4177                 if (initialize_plane(dm, mode_info, i,
4178                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4179                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4180                         goto fail;
4181                 }
4182         }
4183
4184         /*
4185          * Initialize overlay planes, index starting after primary planes.
4186          * These planes have a higher DRM index than the primary planes since
4187          * they should be considered as having a higher z-order.
4188          * Order is reversed to match iteration order in atomic check.
4189          *
4190          * Only support DCN for now, and only expose one so we don't encourage
4191          * userspace to use up all the pipes.
4192          */
4193         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4194                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4195
4196                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4197                         continue;
4198
4199                 if (!plane->blends_with_above || !plane->blends_with_below)
4200                         continue;
4201
4202                 if (!plane->pixel_format_support.argb8888)
4203                         continue;
4204
4205                 if (initialize_plane(dm, NULL, primary_planes + i,
4206                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4207                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4208                         goto fail;
4209                 }
4210
4211                 /* Only create one overlay plane. */
4212                 break;
4213         }
4214
4215         for (i = 0; i < dm->dc->caps.max_streams; i++)
4216                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4217                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4218                         goto fail;
4219                 }
4220
4221 #if defined(CONFIG_DRM_AMD_DC_DCN)
4222         /* Use Outbox interrupt */
4223         switch (adev->ip_versions[DCE_HWIP][0]) {
4224         case IP_VERSION(3, 0, 0):
4225         case IP_VERSION(3, 1, 2):
4226         case IP_VERSION(3, 1, 3):
4227         case IP_VERSION(2, 1, 0):
4228                 if (register_outbox_irq_handlers(dm->adev)) {
4229                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4230                         goto fail;
4231                 }
4232                 break;
4233         default:
4234                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4235                               adev->ip_versions[DCE_HWIP][0]);
4236         }
4237
4238         /* Determine whether to enable PSR support by default. */
4239         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4240                 switch (adev->ip_versions[DCE_HWIP][0]) {
4241                 case IP_VERSION(3, 1, 2):
4242                 case IP_VERSION(3, 1, 3):
4243                         psr_feature_enabled = true;
4244                         break;
4245                 default:
4246                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4247                         break;
4248                 }
4249         }
4250 #endif
4251
4252         /* loops over all connectors on the board */
4253         for (i = 0; i < link_cnt; i++) {
4254                 struct dc_link *link = NULL;
4255
4256                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4257                         DRM_ERROR(
4258                                 "KMS: Cannot support more than %d display indexes\n",
4259                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4260                         continue;
4261                 }
4262
4263                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4264                 if (!aconnector)
4265                         goto fail;
4266
4267                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4268                 if (!aencoder)
4269                         goto fail;
4270
4271                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4272                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4273                         goto fail;
4274                 }
4275
4276                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4277                         DRM_ERROR("KMS: Failed to initialize connector\n");
4278                         goto fail;
4279                 }
4280
4281                 link = dc_get_link_at_index(dm->dc, i);
4282
4283                 if (!dc_link_detect_sink(link, &new_connection_type))
4284                         DRM_ERROR("KMS: Failed to detect connector\n");
4285
4286                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4287                         emulated_link_detect(link);
4288                         amdgpu_dm_update_connector_after_detect(aconnector);
4289
4290                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4291                         amdgpu_dm_update_connector_after_detect(aconnector);
4292                         register_backlight_device(dm, link);
4293                         if (dm->num_of_edps)
4294                                 update_connector_ext_caps(aconnector);
4295                         if (psr_feature_enabled)
4296                                 amdgpu_dm_set_psr_caps(link);
4297                 }
4298
4299
4300         }
4301
4302         /*
4303          * Disable vblank IRQs aggressively for power-saving.
4304          *
4305          * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4306          * is also supported.
4307          */
4308         adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4309
4310         /* Software is initialized. Now we can register interrupt handlers. */
4311         switch (adev->asic_type) {
4312 #if defined(CONFIG_DRM_AMD_DC_SI)
4313         case CHIP_TAHITI:
4314         case CHIP_PITCAIRN:
4315         case CHIP_VERDE:
4316         case CHIP_OLAND:
4317                 if (dce60_register_irq_handlers(dm->adev)) {
4318                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4319                         goto fail;
4320                 }
4321                 break;
4322 #endif
4323         case CHIP_BONAIRE:
4324         case CHIP_HAWAII:
4325         case CHIP_KAVERI:
4326         case CHIP_KABINI:
4327         case CHIP_MULLINS:
4328         case CHIP_TONGA:
4329         case CHIP_FIJI:
4330         case CHIP_CARRIZO:
4331         case CHIP_STONEY:
4332         case CHIP_POLARIS11:
4333         case CHIP_POLARIS10:
4334         case CHIP_POLARIS12:
4335         case CHIP_VEGAM:
4336         case CHIP_VEGA10:
4337         case CHIP_VEGA12:
4338         case CHIP_VEGA20:
4339                 if (dce110_register_irq_handlers(dm->adev)) {
4340                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4341                         goto fail;
4342                 }
4343                 break;
4344         default:
4345 #if defined(CONFIG_DRM_AMD_DC_DCN)
4346                 switch (adev->ip_versions[DCE_HWIP][0]) {
4347                 case IP_VERSION(1, 0, 0):
4348                 case IP_VERSION(1, 0, 1):
4349                 case IP_VERSION(2, 0, 2):
4350                 case IP_VERSION(2, 0, 3):
4351                 case IP_VERSION(2, 0, 0):
4352                 case IP_VERSION(2, 1, 0):
4353                 case IP_VERSION(3, 0, 0):
4354                 case IP_VERSION(3, 0, 2):
4355                 case IP_VERSION(3, 0, 3):
4356                 case IP_VERSION(3, 0, 1):
4357                 case IP_VERSION(3, 1, 2):
4358                 case IP_VERSION(3, 1, 3):
4359                         if (dcn10_register_irq_handlers(dm->adev)) {
4360                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4361                                 goto fail;
4362                         }
4363                         break;
4364                 default:
4365                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4366                                         adev->ip_versions[DCE_HWIP][0]);
4367                         goto fail;
4368                 }
4369 #endif
4370                 break;
4371         }
4372
4373         return 0;
4374 fail:
4375         kfree(aencoder);
4376         kfree(aconnector);
4377
4378         return -EINVAL;
4379 }
4380
4381 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4382 {
4383         drm_atomic_private_obj_fini(&dm->atomic_obj);
4384         return;
4385 }
4386
4387 /******************************************************************************
4388  * amdgpu_display_funcs functions
4389  *****************************************************************************/
4390
4391 /*
4392  * dm_bandwidth_update - program display watermarks
4393  *
4394  * @adev: amdgpu_device pointer
4395  *
4396  * Calculate and program the display watermarks and line buffer allocation.
4397  */
4398 static void dm_bandwidth_update(struct amdgpu_device *adev)
4399 {
4400         /* TODO: implement later */
4401 }
4402
4403 static const struct amdgpu_display_funcs dm_display_funcs = {
4404         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4405         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4406         .backlight_set_level = NULL, /* never called for DC */
4407         .backlight_get_level = NULL, /* never called for DC */
4408         .hpd_sense = NULL,/* called unconditionally */
4409         .hpd_set_polarity = NULL, /* called unconditionally */
4410         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4411         .page_flip_get_scanoutpos =
4412                 dm_crtc_get_scanoutpos,/* called unconditionally */
4413         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4414         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4415 };
4416
4417 #if defined(CONFIG_DEBUG_KERNEL_DC)
4418
4419 static ssize_t s3_debug_store(struct device *device,
4420                               struct device_attribute *attr,
4421                               const char *buf,
4422                               size_t count)
4423 {
4424         int ret;
4425         int s3_state;
4426         struct drm_device *drm_dev = dev_get_drvdata(device);
4427         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4428
4429         ret = kstrtoint(buf, 0, &s3_state);
4430
4431         if (ret == 0) {
4432                 if (s3_state) {
4433                         dm_resume(adev);
4434                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4435                 } else
4436                         dm_suspend(adev);
4437         }
4438
4439         return ret == 0 ? count : 0;
4440 }
4441
4442 DEVICE_ATTR_WO(s3_debug);
4443
4444 #endif
4445
4446 static int dm_early_init(void *handle)
4447 {
4448         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4449
4450         switch (adev->asic_type) {
4451 #if defined(CONFIG_DRM_AMD_DC_SI)
4452         case CHIP_TAHITI:
4453         case CHIP_PITCAIRN:
4454         case CHIP_VERDE:
4455                 adev->mode_info.num_crtc = 6;
4456                 adev->mode_info.num_hpd = 6;
4457                 adev->mode_info.num_dig = 6;
4458                 break;
4459         case CHIP_OLAND:
4460                 adev->mode_info.num_crtc = 2;
4461                 adev->mode_info.num_hpd = 2;
4462                 adev->mode_info.num_dig = 2;
4463                 break;
4464 #endif
4465         case CHIP_BONAIRE:
4466         case CHIP_HAWAII:
4467                 adev->mode_info.num_crtc = 6;
4468                 adev->mode_info.num_hpd = 6;
4469                 adev->mode_info.num_dig = 6;
4470                 break;
4471         case CHIP_KAVERI:
4472                 adev->mode_info.num_crtc = 4;
4473                 adev->mode_info.num_hpd = 6;
4474                 adev->mode_info.num_dig = 7;
4475                 break;
4476         case CHIP_KABINI:
4477         case CHIP_MULLINS:
4478                 adev->mode_info.num_crtc = 2;
4479                 adev->mode_info.num_hpd = 6;
4480                 adev->mode_info.num_dig = 6;
4481                 break;
4482         case CHIP_FIJI:
4483         case CHIP_TONGA:
4484                 adev->mode_info.num_crtc = 6;
4485                 adev->mode_info.num_hpd = 6;
4486                 adev->mode_info.num_dig = 7;
4487                 break;
4488         case CHIP_CARRIZO:
4489                 adev->mode_info.num_crtc = 3;
4490                 adev->mode_info.num_hpd = 6;
4491                 adev->mode_info.num_dig = 9;
4492                 break;
4493         case CHIP_STONEY:
4494                 adev->mode_info.num_crtc = 2;
4495                 adev->mode_info.num_hpd = 6;
4496                 adev->mode_info.num_dig = 9;
4497                 break;
4498         case CHIP_POLARIS11:
4499         case CHIP_POLARIS12:
4500                 adev->mode_info.num_crtc = 5;
4501                 adev->mode_info.num_hpd = 5;
4502                 adev->mode_info.num_dig = 5;
4503                 break;
4504         case CHIP_POLARIS10:
4505         case CHIP_VEGAM:
4506                 adev->mode_info.num_crtc = 6;
4507                 adev->mode_info.num_hpd = 6;
4508                 adev->mode_info.num_dig = 6;
4509                 break;
4510         case CHIP_VEGA10:
4511         case CHIP_VEGA12:
4512         case CHIP_VEGA20:
4513                 adev->mode_info.num_crtc = 6;
4514                 adev->mode_info.num_hpd = 6;
4515                 adev->mode_info.num_dig = 6;
4516                 break;
4517         default:
4518 #if defined(CONFIG_DRM_AMD_DC_DCN)
4519                 switch (adev->ip_versions[DCE_HWIP][0]) {
4520                 case IP_VERSION(2, 0, 2):
4521                 case IP_VERSION(3, 0, 0):
4522                         adev->mode_info.num_crtc = 6;
4523                         adev->mode_info.num_hpd = 6;
4524                         adev->mode_info.num_dig = 6;
4525                         break;
4526                 case IP_VERSION(2, 0, 0):
4527                 case IP_VERSION(3, 0, 2):
4528                         adev->mode_info.num_crtc = 5;
4529                         adev->mode_info.num_hpd = 5;
4530                         adev->mode_info.num_dig = 5;
4531                         break;
4532                 case IP_VERSION(2, 0, 3):
4533                 case IP_VERSION(3, 0, 3):
4534                         adev->mode_info.num_crtc = 2;
4535                         adev->mode_info.num_hpd = 2;
4536                         adev->mode_info.num_dig = 2;
4537                         break;
4538                 case IP_VERSION(1, 0, 0):
4539                 case IP_VERSION(1, 0, 1):
4540                 case IP_VERSION(3, 0, 1):
4541                 case IP_VERSION(2, 1, 0):
4542                 case IP_VERSION(3, 1, 2):
4543                 case IP_VERSION(3, 1, 3):
4544                         adev->mode_info.num_crtc = 4;
4545                         adev->mode_info.num_hpd = 4;
4546                         adev->mode_info.num_dig = 4;
4547                         break;
4548                 default:
4549                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4550                                         adev->ip_versions[DCE_HWIP][0]);
4551                         return -EINVAL;
4552                 }
4553 #endif
4554                 break;
4555         }
4556
4557         amdgpu_dm_set_irq_funcs(adev);
4558
4559         if (adev->mode_info.funcs == NULL)
4560                 adev->mode_info.funcs = &dm_display_funcs;
4561
4562         /*
4563          * Note: Do NOT change adev->audio_endpt_rreg and
4564          * adev->audio_endpt_wreg because they are initialised in
4565          * amdgpu_device_init()
4566          */
4567 #if defined(CONFIG_DEBUG_KERNEL_DC)
4568         device_create_file(
4569                 adev_to_drm(adev)->dev,
4570                 &dev_attr_s3_debug);
4571 #endif
4572
4573         return 0;
4574 }
4575
4576 static bool modeset_required(struct drm_crtc_state *crtc_state,
4577                              struct dc_stream_state *new_stream,
4578                              struct dc_stream_state *old_stream)
4579 {
4580         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4581 }
4582
4583 static bool modereset_required(struct drm_crtc_state *crtc_state)
4584 {
4585         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4586 }
4587
4588 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4589 {
4590         drm_encoder_cleanup(encoder);
4591         kfree(encoder);
4592 }
4593
4594 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4595         .destroy = amdgpu_dm_encoder_destroy,
4596 };
4597
4598
4599 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4600                                          struct drm_framebuffer *fb,
4601                                          int *min_downscale, int *max_upscale)
4602 {
4603         struct amdgpu_device *adev = drm_to_adev(dev);
4604         struct dc *dc = adev->dm.dc;
4605         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4606         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4607
4608         switch (fb->format->format) {
4609         case DRM_FORMAT_P010:
4610         case DRM_FORMAT_NV12:
4611         case DRM_FORMAT_NV21:
4612                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4613                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4614                 break;
4615
4616         case DRM_FORMAT_XRGB16161616F:
4617         case DRM_FORMAT_ARGB16161616F:
4618         case DRM_FORMAT_XBGR16161616F:
4619         case DRM_FORMAT_ABGR16161616F:
4620                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4621                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4622                 break;
4623
4624         default:
4625                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4626                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4627                 break;
4628         }
4629
4630         /*
4631          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4632          * scaling factor of 1.0 == 1000 units.
4633          */
4634         if (*max_upscale == 1)
4635                 *max_upscale = 1000;
4636
4637         if (*min_downscale == 1)
4638                 *min_downscale = 1000;
4639 }
4640
4641
4642 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4643                                 const struct drm_plane_state *state,
4644                                 struct dc_scaling_info *scaling_info)
4645 {
4646         int scale_w, scale_h, min_downscale, max_upscale;
4647
4648         memset(scaling_info, 0, sizeof(*scaling_info));
4649
4650         /* Source is fixed 16.16 but we ignore mantissa for now... */
4651         scaling_info->src_rect.x = state->src_x >> 16;
4652         scaling_info->src_rect.y = state->src_y >> 16;
4653
4654         /*
4655          * For reasons we don't (yet) fully understand a non-zero
4656          * src_y coordinate into an NV12 buffer can cause a
4657          * system hang on DCN1x.
4658          * To avoid hangs (and maybe be overly cautious)
4659          * let's reject both non-zero src_x and src_y.
4660          *
4661          * We currently know of only one use-case to reproduce a
4662          * scenario with non-zero src_x and src_y for NV12, which
4663          * is to gesture the YouTube Android app into full screen
4664          * on ChromeOS.
4665          */
4666         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4667             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4668             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4669             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4670                 return -EINVAL;
4671
4672         scaling_info->src_rect.width = state->src_w >> 16;
4673         if (scaling_info->src_rect.width == 0)
4674                 return -EINVAL;
4675
4676         scaling_info->src_rect.height = state->src_h >> 16;
4677         if (scaling_info->src_rect.height == 0)
4678                 return -EINVAL;
4679
4680         scaling_info->dst_rect.x = state->crtc_x;
4681         scaling_info->dst_rect.y = state->crtc_y;
4682
4683         if (state->crtc_w == 0)
4684                 return -EINVAL;
4685
4686         scaling_info->dst_rect.width = state->crtc_w;
4687
4688         if (state->crtc_h == 0)
4689                 return -EINVAL;
4690
4691         scaling_info->dst_rect.height = state->crtc_h;
4692
4693         /* DRM doesn't specify clipping on destination output. */
4694         scaling_info->clip_rect = scaling_info->dst_rect;
4695
4696         /* Validate scaling per-format with DC plane caps */
4697         if (state->plane && state->plane->dev && state->fb) {
4698                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4699                                              &min_downscale, &max_upscale);
4700         } else {
4701                 min_downscale = 250;
4702                 max_upscale = 16000;
4703         }
4704
4705         scale_w = scaling_info->dst_rect.width * 1000 /
4706                   scaling_info->src_rect.width;
4707
4708         if (scale_w < min_downscale || scale_w > max_upscale)
4709                 return -EINVAL;
4710
4711         scale_h = scaling_info->dst_rect.height * 1000 /
4712                   scaling_info->src_rect.height;
4713
4714         if (scale_h < min_downscale || scale_h > max_upscale)
4715                 return -EINVAL;
4716
4717         /*
4718          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4719          * assume reasonable defaults based on the format.
4720          */
4721
4722         return 0;
4723 }
4724
4725 static void
4726 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4727                                  uint64_t tiling_flags)
4728 {
4729         /* Fill GFX8 params */
4730         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4731                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4732
4733                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4734                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4735                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4736                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4737                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4738
4739                 /* XXX fix me for VI */
4740                 tiling_info->gfx8.num_banks = num_banks;
4741                 tiling_info->gfx8.array_mode =
4742                                 DC_ARRAY_2D_TILED_THIN1;
4743                 tiling_info->gfx8.tile_split = tile_split;
4744                 tiling_info->gfx8.bank_width = bankw;
4745                 tiling_info->gfx8.bank_height = bankh;
4746                 tiling_info->gfx8.tile_aspect = mtaspect;
4747                 tiling_info->gfx8.tile_mode =
4748                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4749         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4750                         == DC_ARRAY_1D_TILED_THIN1) {
4751                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4752         }
4753
4754         tiling_info->gfx8.pipe_config =
4755                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4756 }
4757
4758 static void
4759 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4760                                   union dc_tiling_info *tiling_info)
4761 {
4762         tiling_info->gfx9.num_pipes =
4763                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4764         tiling_info->gfx9.num_banks =
4765                 adev->gfx.config.gb_addr_config_fields.num_banks;
4766         tiling_info->gfx9.pipe_interleave =
4767                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4768         tiling_info->gfx9.num_shader_engines =
4769                 adev->gfx.config.gb_addr_config_fields.num_se;
4770         tiling_info->gfx9.max_compressed_frags =
4771                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4772         tiling_info->gfx9.num_rb_per_se =
4773                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4774         tiling_info->gfx9.shaderEnable = 1;
4775         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4776                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4777 }
4778
4779 static int
4780 validate_dcc(struct amdgpu_device *adev,
4781              const enum surface_pixel_format format,
4782              const enum dc_rotation_angle rotation,
4783              const union dc_tiling_info *tiling_info,
4784              const struct dc_plane_dcc_param *dcc,
4785              const struct dc_plane_address *address,
4786              const struct plane_size *plane_size)
4787 {
4788         struct dc *dc = adev->dm.dc;
4789         struct dc_dcc_surface_param input;
4790         struct dc_surface_dcc_cap output;
4791
4792         memset(&input, 0, sizeof(input));
4793         memset(&output, 0, sizeof(output));
4794
4795         if (!dcc->enable)
4796                 return 0;
4797
4798         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4799             !dc->cap_funcs.get_dcc_compression_cap)
4800                 return -EINVAL;
4801
4802         input.format = format;
4803         input.surface_size.width = plane_size->surface_size.width;
4804         input.surface_size.height = plane_size->surface_size.height;
4805         input.swizzle_mode = tiling_info->gfx9.swizzle;
4806
4807         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4808                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4809         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4810                 input.scan = SCAN_DIRECTION_VERTICAL;
4811
4812         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4813                 return -EINVAL;
4814
4815         if (!output.capable)
4816                 return -EINVAL;
4817
4818         if (dcc->independent_64b_blks == 0 &&
4819             output.grph.rgb.independent_64b_blks != 0)
4820                 return -EINVAL;
4821
4822         return 0;
4823 }
4824
4825 static bool
4826 modifier_has_dcc(uint64_t modifier)
4827 {
4828         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4829 }
4830
4831 static unsigned
4832 modifier_gfx9_swizzle_mode(uint64_t modifier)
4833 {
4834         if (modifier == DRM_FORMAT_MOD_LINEAR)
4835                 return 0;
4836
4837         return AMD_FMT_MOD_GET(TILE, modifier);
4838 }
4839
4840 static const struct drm_format_info *
4841 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4842 {
4843         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4844 }
4845
4846 static void
4847 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4848                                     union dc_tiling_info *tiling_info,
4849                                     uint64_t modifier)
4850 {
4851         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4852         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4853         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4854         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4855
4856         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4857
4858         if (!IS_AMD_FMT_MOD(modifier))
4859                 return;
4860
4861         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4862         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4863
4864         if (adev->family >= AMDGPU_FAMILY_NV) {
4865                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4866         } else {
4867                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4868
4869                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4870         }
4871 }
4872
4873 enum dm_micro_swizzle {
4874         MICRO_SWIZZLE_Z = 0,
4875         MICRO_SWIZZLE_S = 1,
4876         MICRO_SWIZZLE_D = 2,
4877         MICRO_SWIZZLE_R = 3
4878 };
4879
4880 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4881                                           uint32_t format,
4882                                           uint64_t modifier)
4883 {
4884         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4885         const struct drm_format_info *info = drm_format_info(format);
4886         int i;
4887
4888         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4889
4890         if (!info)
4891                 return false;
4892
4893         /*
4894          * We always have to allow these modifiers:
4895          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4896          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4897          */
4898         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4899             modifier == DRM_FORMAT_MOD_INVALID) {
4900                 return true;
4901         }
4902
4903         /* Check that the modifier is on the list of the plane's supported modifiers. */
4904         for (i = 0; i < plane->modifier_count; i++) {
4905                 if (modifier == plane->modifiers[i])
4906                         break;
4907         }
4908         if (i == plane->modifier_count)
4909                 return false;
4910
4911         /*
4912          * For D swizzle the canonical modifier depends on the bpp, so check
4913          * it here.
4914          */
4915         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4916             adev->family >= AMDGPU_FAMILY_NV) {
4917                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4918                         return false;
4919         }
4920
4921         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4922             info->cpp[0] < 8)
4923                 return false;
4924
4925         if (modifier_has_dcc(modifier)) {
4926                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4927                 if (info->cpp[0] != 4)
4928                         return false;
4929                 /* We support multi-planar formats, but not when combined with
4930                  * additional DCC metadata planes. */
4931                 if (info->num_planes > 1)
4932                         return false;
4933         }
4934
4935         return true;
4936 }
4937
4938 static void
4939 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4940 {
4941         if (!*mods)
4942                 return;
4943
4944         if (*cap - *size < 1) {
4945                 uint64_t new_cap = *cap * 2;
4946                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4947
4948                 if (!new_mods) {
4949                         kfree(*mods);
4950                         *mods = NULL;
4951                         return;
4952                 }
4953
4954                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4955                 kfree(*mods);
4956                 *mods = new_mods;
4957                 *cap = new_cap;
4958         }
4959
4960         (*mods)[*size] = mod;
4961         *size += 1;
4962 }
4963
4964 static void
4965 add_gfx9_modifiers(const struct amdgpu_device *adev,
4966                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4967 {
4968         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4969         int pipe_xor_bits = min(8, pipes +
4970                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4971         int bank_xor_bits = min(8 - pipe_xor_bits,
4972                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4973         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4974                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4975
4976
4977         if (adev->family == AMDGPU_FAMILY_RV) {
4978                 /* Raven2 and later */
4979                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4980
4981                 /*
4982                  * No _D DCC swizzles yet because we only allow 32bpp, which
4983                  * doesn't support _D on DCN
4984                  */
4985
4986                 if (has_constant_encode) {
4987                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4988                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4989                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4990                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4991                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4992                                     AMD_FMT_MOD_SET(DCC, 1) |
4993                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4994                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4995                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4996                 }
4997
4998                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4999                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5000                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5001                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5002                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5003                             AMD_FMT_MOD_SET(DCC, 1) |
5004                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5005                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5006                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5007
5008                 if (has_constant_encode) {
5009                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5011                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5014                                     AMD_FMT_MOD_SET(DCC, 1) |
5015                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5016                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5017                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5018
5019                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5020                                     AMD_FMT_MOD_SET(RB, rb) |
5021                                     AMD_FMT_MOD_SET(PIPE, pipes));
5022                 }
5023
5024                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5025                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5026                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5027                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5028                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5029                             AMD_FMT_MOD_SET(DCC, 1) |
5030                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5031                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5032                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5033                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5034                             AMD_FMT_MOD_SET(RB, rb) |
5035                             AMD_FMT_MOD_SET(PIPE, pipes));
5036         }
5037
5038         /*
5039          * Only supported for 64bpp on Raven, will be filtered on format in
5040          * dm_plane_format_mod_supported.
5041          */
5042         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5043                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5044                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5045                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5046                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5047
5048         if (adev->family == AMDGPU_FAMILY_RV) {
5049                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5050                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5051                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5052                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5053                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5054         }
5055
5056         /*
5057          * Only supported for 64bpp on Raven, will be filtered on format in
5058          * dm_plane_format_mod_supported.
5059          */
5060         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5062                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5063
5064         if (adev->family == AMDGPU_FAMILY_RV) {
5065                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5066                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5067                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5068         }
5069 }
5070
5071 static void
5072 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5073                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5074 {
5075         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5076
5077         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5079                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5080                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5081                     AMD_FMT_MOD_SET(DCC, 1) |
5082                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5083                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5084                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5085
5086         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5088                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5089                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090                     AMD_FMT_MOD_SET(DCC, 1) |
5091                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5092                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5093                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5094                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5095
5096         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5098                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5099                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5100
5101         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5103                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5104                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5105
5106
5107         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5108         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5110                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5111
5112         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5114                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5115 }
5116
5117 static void
5118 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5119                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5120 {
5121         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5122         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5123
5124         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5126                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5129                     AMD_FMT_MOD_SET(DCC, 1) |
5130                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5131                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5132                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5133                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5134
5135         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5140                     AMD_FMT_MOD_SET(DCC, 1) |
5141                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5142                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5143                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5144
5145         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5147                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5148                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5149                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5150                     AMD_FMT_MOD_SET(DCC, 1) |
5151                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5152                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5153                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5154                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5155                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5156
5157         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5160                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5162                     AMD_FMT_MOD_SET(DCC, 1) |
5163                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5164                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5165                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5166                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5167
5168         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5169                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5170                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5171                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5172                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5173
5174         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5175                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5176                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5177                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5178                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5179
5180         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5181         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5183                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5184
5185         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5186                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5187                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5188 }
5189
5190 static int
5191 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5192 {
5193         uint64_t size = 0, capacity = 128;
5194         *mods = NULL;
5195
5196         /* We have not hooked up any pre-GFX9 modifiers. */
5197         if (adev->family < AMDGPU_FAMILY_AI)
5198                 return 0;
5199
5200         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5201
5202         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5203                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5204                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5205                 return *mods ? 0 : -ENOMEM;
5206         }
5207
5208         switch (adev->family) {
5209         case AMDGPU_FAMILY_AI:
5210         case AMDGPU_FAMILY_RV:
5211                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5212                 break;
5213         case AMDGPU_FAMILY_NV:
5214         case AMDGPU_FAMILY_VGH:
5215         case AMDGPU_FAMILY_YC:
5216                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5217                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5218                 else
5219                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5220                 break;
5221         }
5222
5223         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5224
5225         /* INVALID marks the end of the list. */
5226         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5227
5228         if (!*mods)
5229                 return -ENOMEM;
5230
5231         return 0;
5232 }
5233
5234 static int
5235 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5236                                           const struct amdgpu_framebuffer *afb,
5237                                           const enum surface_pixel_format format,
5238                                           const enum dc_rotation_angle rotation,
5239                                           const struct plane_size *plane_size,
5240                                           union dc_tiling_info *tiling_info,
5241                                           struct dc_plane_dcc_param *dcc,
5242                                           struct dc_plane_address *address,
5243                                           const bool force_disable_dcc)
5244 {
5245         const uint64_t modifier = afb->base.modifier;
5246         int ret = 0;
5247
5248         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5249         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5250
5251         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5252                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5253                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5254                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5255
5256                 dcc->enable = 1;
5257                 dcc->meta_pitch = afb->base.pitches[1];
5258                 dcc->independent_64b_blks = independent_64b_blks;
5259                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5260                         if (independent_64b_blks && independent_128b_blks)
5261                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5262                         else if (independent_128b_blks)
5263                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5264                         else if (independent_64b_blks && !independent_128b_blks)
5265                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5266                         else
5267                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5268                 } else {
5269                         if (independent_64b_blks)
5270                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5271                         else
5272                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5273                 }
5274
5275                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5276                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5277         }
5278
5279         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5280         if (ret)
5281                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5282
5283         return ret;
5284 }
5285
5286 static int
5287 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5288                              const struct amdgpu_framebuffer *afb,
5289                              const enum surface_pixel_format format,
5290                              const enum dc_rotation_angle rotation,
5291                              const uint64_t tiling_flags,
5292                              union dc_tiling_info *tiling_info,
5293                              struct plane_size *plane_size,
5294                              struct dc_plane_dcc_param *dcc,
5295                              struct dc_plane_address *address,
5296                              bool tmz_surface,
5297                              bool force_disable_dcc)
5298 {
5299         const struct drm_framebuffer *fb = &afb->base;
5300         int ret;
5301
5302         memset(tiling_info, 0, sizeof(*tiling_info));
5303         memset(plane_size, 0, sizeof(*plane_size));
5304         memset(dcc, 0, sizeof(*dcc));
5305         memset(address, 0, sizeof(*address));
5306
5307         address->tmz_surface = tmz_surface;
5308
5309         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5310                 uint64_t addr = afb->address + fb->offsets[0];
5311
5312                 plane_size->surface_size.x = 0;
5313                 plane_size->surface_size.y = 0;
5314                 plane_size->surface_size.width = fb->width;
5315                 plane_size->surface_size.height = fb->height;
5316                 plane_size->surface_pitch =
5317                         fb->pitches[0] / fb->format->cpp[0];
5318
5319                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5320                 address->grph.addr.low_part = lower_32_bits(addr);
5321                 address->grph.addr.high_part = upper_32_bits(addr);
5322         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5323                 uint64_t luma_addr = afb->address + fb->offsets[0];
5324                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5325
5326                 plane_size->surface_size.x = 0;
5327                 plane_size->surface_size.y = 0;
5328                 plane_size->surface_size.width = fb->width;
5329                 plane_size->surface_size.height = fb->height;
5330                 plane_size->surface_pitch =
5331                         fb->pitches[0] / fb->format->cpp[0];
5332
5333                 plane_size->chroma_size.x = 0;
5334                 plane_size->chroma_size.y = 0;
5335                 /* TODO: set these based on surface format */
5336                 plane_size->chroma_size.width = fb->width / 2;
5337                 plane_size->chroma_size.height = fb->height / 2;
5338
5339                 plane_size->chroma_pitch =
5340                         fb->pitches[1] / fb->format->cpp[1];
5341
5342                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5343                 address->video_progressive.luma_addr.low_part =
5344                         lower_32_bits(luma_addr);
5345                 address->video_progressive.luma_addr.high_part =
5346                         upper_32_bits(luma_addr);
5347                 address->video_progressive.chroma_addr.low_part =
5348                         lower_32_bits(chroma_addr);
5349                 address->video_progressive.chroma_addr.high_part =
5350                         upper_32_bits(chroma_addr);
5351         }
5352
5353         if (adev->family >= AMDGPU_FAMILY_AI) {
5354                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5355                                                                 rotation, plane_size,
5356                                                                 tiling_info, dcc,
5357                                                                 address,
5358                                                                 force_disable_dcc);
5359                 if (ret)
5360                         return ret;
5361         } else {
5362                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5363         }
5364
5365         return 0;
5366 }
5367
5368 static void
5369 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5370                                bool *per_pixel_alpha, bool *global_alpha,
5371                                int *global_alpha_value)
5372 {
5373         *per_pixel_alpha = false;
5374         *global_alpha = false;
5375         *global_alpha_value = 0xff;
5376
5377         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5378                 return;
5379
5380         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5381                 static const uint32_t alpha_formats[] = {
5382                         DRM_FORMAT_ARGB8888,
5383                         DRM_FORMAT_RGBA8888,
5384                         DRM_FORMAT_ABGR8888,
5385                 };
5386                 uint32_t format = plane_state->fb->format->format;
5387                 unsigned int i;
5388
5389                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5390                         if (format == alpha_formats[i]) {
5391                                 *per_pixel_alpha = true;
5392                                 break;
5393                         }
5394                 }
5395         }
5396
5397         if (plane_state->alpha < 0xffff) {
5398                 *global_alpha = true;
5399                 *global_alpha_value = plane_state->alpha >> 8;
5400         }
5401 }
5402
5403 static int
5404 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5405                             const enum surface_pixel_format format,
5406                             enum dc_color_space *color_space)
5407 {
5408         bool full_range;
5409
5410         *color_space = COLOR_SPACE_SRGB;
5411
5412         /* DRM color properties only affect non-RGB formats. */
5413         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5414                 return 0;
5415
5416         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5417
5418         switch (plane_state->color_encoding) {
5419         case DRM_COLOR_YCBCR_BT601:
5420                 if (full_range)
5421                         *color_space = COLOR_SPACE_YCBCR601;
5422                 else
5423                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5424                 break;
5425
5426         case DRM_COLOR_YCBCR_BT709:
5427                 if (full_range)
5428                         *color_space = COLOR_SPACE_YCBCR709;
5429                 else
5430                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5431                 break;
5432
5433         case DRM_COLOR_YCBCR_BT2020:
5434                 if (full_range)
5435                         *color_space = COLOR_SPACE_2020_YCBCR;
5436                 else
5437                         return -EINVAL;
5438                 break;
5439
5440         default:
5441                 return -EINVAL;
5442         }
5443
5444         return 0;
5445 }
5446
5447 static int
5448 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5449                             const struct drm_plane_state *plane_state,
5450                             const uint64_t tiling_flags,
5451                             struct dc_plane_info *plane_info,
5452                             struct dc_plane_address *address,
5453                             bool tmz_surface,
5454                             bool force_disable_dcc)
5455 {
5456         const struct drm_framebuffer *fb = plane_state->fb;
5457         const struct amdgpu_framebuffer *afb =
5458                 to_amdgpu_framebuffer(plane_state->fb);
5459         int ret;
5460
5461         memset(plane_info, 0, sizeof(*plane_info));
5462
5463         switch (fb->format->format) {
5464         case DRM_FORMAT_C8:
5465                 plane_info->format =
5466                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5467                 break;
5468         case DRM_FORMAT_RGB565:
5469                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5470                 break;
5471         case DRM_FORMAT_XRGB8888:
5472         case DRM_FORMAT_ARGB8888:
5473                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5474                 break;
5475         case DRM_FORMAT_XRGB2101010:
5476         case DRM_FORMAT_ARGB2101010:
5477                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5478                 break;
5479         case DRM_FORMAT_XBGR2101010:
5480         case DRM_FORMAT_ABGR2101010:
5481                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5482                 break;
5483         case DRM_FORMAT_XBGR8888:
5484         case DRM_FORMAT_ABGR8888:
5485                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5486                 break;
5487         case DRM_FORMAT_NV21:
5488                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5489                 break;
5490         case DRM_FORMAT_NV12:
5491                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5492                 break;
5493         case DRM_FORMAT_P010:
5494                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5495                 break;
5496         case DRM_FORMAT_XRGB16161616F:
5497         case DRM_FORMAT_ARGB16161616F:
5498                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5499                 break;
5500         case DRM_FORMAT_XBGR16161616F:
5501         case DRM_FORMAT_ABGR16161616F:
5502                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5503                 break;
5504         case DRM_FORMAT_XRGB16161616:
5505         case DRM_FORMAT_ARGB16161616:
5506                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5507                 break;
5508         case DRM_FORMAT_XBGR16161616:
5509         case DRM_FORMAT_ABGR16161616:
5510                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5511                 break;
5512         default:
5513                 DRM_ERROR(
5514                         "Unsupported screen format %p4cc\n",
5515                         &fb->format->format);
5516                 return -EINVAL;
5517         }
5518
5519         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5520         case DRM_MODE_ROTATE_0:
5521                 plane_info->rotation = ROTATION_ANGLE_0;
5522                 break;
5523         case DRM_MODE_ROTATE_90:
5524                 plane_info->rotation = ROTATION_ANGLE_90;
5525                 break;
5526         case DRM_MODE_ROTATE_180:
5527                 plane_info->rotation = ROTATION_ANGLE_180;
5528                 break;
5529         case DRM_MODE_ROTATE_270:
5530                 plane_info->rotation = ROTATION_ANGLE_270;
5531                 break;
5532         default:
5533                 plane_info->rotation = ROTATION_ANGLE_0;
5534                 break;
5535         }
5536
5537         plane_info->visible = true;
5538         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5539
5540         plane_info->layer_index = 0;
5541
5542         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5543                                           &plane_info->color_space);
5544         if (ret)
5545                 return ret;
5546
5547         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5548                                            plane_info->rotation, tiling_flags,
5549                                            &plane_info->tiling_info,
5550                                            &plane_info->plane_size,
5551                                            &plane_info->dcc, address, tmz_surface,
5552                                            force_disable_dcc);
5553         if (ret)
5554                 return ret;
5555
5556         fill_blending_from_plane_state(
5557                 plane_state, &plane_info->per_pixel_alpha,
5558                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5559
5560         return 0;
5561 }
5562
5563 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5564                                     struct dc_plane_state *dc_plane_state,
5565                                     struct drm_plane_state *plane_state,
5566                                     struct drm_crtc_state *crtc_state)
5567 {
5568         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5569         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5570         struct dc_scaling_info scaling_info;
5571         struct dc_plane_info plane_info;
5572         int ret;
5573         bool force_disable_dcc = false;
5574
5575         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5576         if (ret)
5577                 return ret;
5578
5579         dc_plane_state->src_rect = scaling_info.src_rect;
5580         dc_plane_state->dst_rect = scaling_info.dst_rect;
5581         dc_plane_state->clip_rect = scaling_info.clip_rect;
5582         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5583
5584         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5585         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5586                                           afb->tiling_flags,
5587                                           &plane_info,
5588                                           &dc_plane_state->address,
5589                                           afb->tmz_surface,
5590                                           force_disable_dcc);
5591         if (ret)
5592                 return ret;
5593
5594         dc_plane_state->format = plane_info.format;
5595         dc_plane_state->color_space = plane_info.color_space;
5596         dc_plane_state->format = plane_info.format;
5597         dc_plane_state->plane_size = plane_info.plane_size;
5598         dc_plane_state->rotation = plane_info.rotation;
5599         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5600         dc_plane_state->stereo_format = plane_info.stereo_format;
5601         dc_plane_state->tiling_info = plane_info.tiling_info;
5602         dc_plane_state->visible = plane_info.visible;
5603         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5604         dc_plane_state->global_alpha = plane_info.global_alpha;
5605         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5606         dc_plane_state->dcc = plane_info.dcc;
5607         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5608         dc_plane_state->flip_int_enabled = true;
5609
5610         /*
5611          * Always set input transfer function, since plane state is refreshed
5612          * every time.
5613          */
5614         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5615         if (ret)
5616                 return ret;
5617
5618         return 0;
5619 }
5620
5621 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5622                                            const struct dm_connector_state *dm_state,
5623                                            struct dc_stream_state *stream)
5624 {
5625         enum amdgpu_rmx_type rmx_type;
5626
5627         struct rect src = { 0 }; /* viewport in composition space*/
5628         struct rect dst = { 0 }; /* stream addressable area */
5629
5630         /* no mode. nothing to be done */
5631         if (!mode)
5632                 return;
5633
5634         /* Full screen scaling by default */
5635         src.width = mode->hdisplay;
5636         src.height = mode->vdisplay;
5637         dst.width = stream->timing.h_addressable;
5638         dst.height = stream->timing.v_addressable;
5639
5640         if (dm_state) {
5641                 rmx_type = dm_state->scaling;
5642                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5643                         if (src.width * dst.height <
5644                                         src.height * dst.width) {
5645                                 /* height needs less upscaling/more downscaling */
5646                                 dst.width = src.width *
5647                                                 dst.height / src.height;
5648                         } else {
5649                                 /* width needs less upscaling/more downscaling */
5650                                 dst.height = src.height *
5651                                                 dst.width / src.width;
5652                         }
5653                 } else if (rmx_type == RMX_CENTER) {
5654                         dst = src;
5655                 }
5656
5657                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5658                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5659
5660                 if (dm_state->underscan_enable) {
5661                         dst.x += dm_state->underscan_hborder / 2;
5662                         dst.y += dm_state->underscan_vborder / 2;
5663                         dst.width -= dm_state->underscan_hborder;
5664                         dst.height -= dm_state->underscan_vborder;
5665                 }
5666         }
5667
5668         stream->src = src;
5669         stream->dst = dst;
5670
5671         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5672                       dst.x, dst.y, dst.width, dst.height);
5673
5674 }
5675
5676 static enum dc_color_depth
5677 convert_color_depth_from_display_info(const struct drm_connector *connector,
5678                                       bool is_y420, int requested_bpc)
5679 {
5680         uint8_t bpc;
5681
5682         if (is_y420) {
5683                 bpc = 8;
5684
5685                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5686                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5687                         bpc = 16;
5688                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5689                         bpc = 12;
5690                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5691                         bpc = 10;
5692         } else {
5693                 bpc = (uint8_t)connector->display_info.bpc;
5694                 /* Assume 8 bpc by default if no bpc is specified. */
5695                 bpc = bpc ? bpc : 8;
5696         }
5697
5698         if (requested_bpc > 0) {
5699                 /*
5700                  * Cap display bpc based on the user requested value.
5701                  *
5702                  * The value for state->max_bpc may not correctly updated
5703                  * depending on when the connector gets added to the state
5704                  * or if this was called outside of atomic check, so it
5705                  * can't be used directly.
5706                  */
5707                 bpc = min_t(u8, bpc, requested_bpc);
5708
5709                 /* Round down to the nearest even number. */
5710                 bpc = bpc - (bpc & 1);
5711         }
5712
5713         switch (bpc) {
5714         case 0:
5715                 /*
5716                  * Temporary Work around, DRM doesn't parse color depth for
5717                  * EDID revision before 1.4
5718                  * TODO: Fix edid parsing
5719                  */
5720                 return COLOR_DEPTH_888;
5721         case 6:
5722                 return COLOR_DEPTH_666;
5723         case 8:
5724                 return COLOR_DEPTH_888;
5725         case 10:
5726                 return COLOR_DEPTH_101010;
5727         case 12:
5728                 return COLOR_DEPTH_121212;
5729         case 14:
5730                 return COLOR_DEPTH_141414;
5731         case 16:
5732                 return COLOR_DEPTH_161616;
5733         default:
5734                 return COLOR_DEPTH_UNDEFINED;
5735         }
5736 }
5737
5738 static enum dc_aspect_ratio
5739 get_aspect_ratio(const struct drm_display_mode *mode_in)
5740 {
5741         /* 1-1 mapping, since both enums follow the HDMI spec. */
5742         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5743 }
5744
5745 static enum dc_color_space
5746 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5747 {
5748         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5749
5750         switch (dc_crtc_timing->pixel_encoding) {
5751         case PIXEL_ENCODING_YCBCR422:
5752         case PIXEL_ENCODING_YCBCR444:
5753         case PIXEL_ENCODING_YCBCR420:
5754         {
5755                 /*
5756                  * 27030khz is the separation point between HDTV and SDTV
5757                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5758                  * respectively
5759                  */
5760                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5761                         if (dc_crtc_timing->flags.Y_ONLY)
5762                                 color_space =
5763                                         COLOR_SPACE_YCBCR709_LIMITED;
5764                         else
5765                                 color_space = COLOR_SPACE_YCBCR709;
5766                 } else {
5767                         if (dc_crtc_timing->flags.Y_ONLY)
5768                                 color_space =
5769                                         COLOR_SPACE_YCBCR601_LIMITED;
5770                         else
5771                                 color_space = COLOR_SPACE_YCBCR601;
5772                 }
5773
5774         }
5775         break;
5776         case PIXEL_ENCODING_RGB:
5777                 color_space = COLOR_SPACE_SRGB;
5778                 break;
5779
5780         default:
5781                 WARN_ON(1);
5782                 break;
5783         }
5784
5785         return color_space;
5786 }
5787
5788 static bool adjust_colour_depth_from_display_info(
5789         struct dc_crtc_timing *timing_out,
5790         const struct drm_display_info *info)
5791 {
5792         enum dc_color_depth depth = timing_out->display_color_depth;
5793         int normalized_clk;
5794         do {
5795                 normalized_clk = timing_out->pix_clk_100hz / 10;
5796                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5797                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5798                         normalized_clk /= 2;
5799                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5800                 switch (depth) {
5801                 case COLOR_DEPTH_888:
5802                         break;
5803                 case COLOR_DEPTH_101010:
5804                         normalized_clk = (normalized_clk * 30) / 24;
5805                         break;
5806                 case COLOR_DEPTH_121212:
5807                         normalized_clk = (normalized_clk * 36) / 24;
5808                         break;
5809                 case COLOR_DEPTH_161616:
5810                         normalized_clk = (normalized_clk * 48) / 24;
5811                         break;
5812                 default:
5813                         /* The above depths are the only ones valid for HDMI. */
5814                         return false;
5815                 }
5816                 if (normalized_clk <= info->max_tmds_clock) {
5817                         timing_out->display_color_depth = depth;
5818                         return true;
5819                 }
5820         } while (--depth > COLOR_DEPTH_666);
5821         return false;
5822 }
5823
5824 static void fill_stream_properties_from_drm_display_mode(
5825         struct dc_stream_state *stream,
5826         const struct drm_display_mode *mode_in,
5827         const struct drm_connector *connector,
5828         const struct drm_connector_state *connector_state,
5829         const struct dc_stream_state *old_stream,
5830         int requested_bpc)
5831 {
5832         struct dc_crtc_timing *timing_out = &stream->timing;
5833         const struct drm_display_info *info = &connector->display_info;
5834         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5835         struct hdmi_vendor_infoframe hv_frame;
5836         struct hdmi_avi_infoframe avi_frame;
5837
5838         memset(&hv_frame, 0, sizeof(hv_frame));
5839         memset(&avi_frame, 0, sizeof(avi_frame));
5840
5841         timing_out->h_border_left = 0;
5842         timing_out->h_border_right = 0;
5843         timing_out->v_border_top = 0;
5844         timing_out->v_border_bottom = 0;
5845         /* TODO: un-hardcode */
5846         if (drm_mode_is_420_only(info, mode_in)
5847                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5848                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5849         else if (drm_mode_is_420_also(info, mode_in)
5850                         && aconnector->force_yuv420_output)
5851                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5852         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5853                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5854                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5855         else
5856                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5857
5858         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5859         timing_out->display_color_depth = convert_color_depth_from_display_info(
5860                 connector,
5861                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5862                 requested_bpc);
5863         timing_out->scan_type = SCANNING_TYPE_NODATA;
5864         timing_out->hdmi_vic = 0;
5865
5866         if(old_stream) {
5867                 timing_out->vic = old_stream->timing.vic;
5868                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5869                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5870         } else {
5871                 timing_out->vic = drm_match_cea_mode(mode_in);
5872                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5873                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5874                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5875                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5876         }
5877
5878         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5879                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5880                 timing_out->vic = avi_frame.video_code;
5881                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5882                 timing_out->hdmi_vic = hv_frame.vic;
5883         }
5884
5885         if (is_freesync_video_mode(mode_in, aconnector)) {
5886                 timing_out->h_addressable = mode_in->hdisplay;
5887                 timing_out->h_total = mode_in->htotal;
5888                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5889                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5890                 timing_out->v_total = mode_in->vtotal;
5891                 timing_out->v_addressable = mode_in->vdisplay;
5892                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5893                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5894                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5895         } else {
5896                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5897                 timing_out->h_total = mode_in->crtc_htotal;
5898                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5899                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5900                 timing_out->v_total = mode_in->crtc_vtotal;
5901                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5902                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5903                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5904                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5905         }
5906
5907         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5908
5909         stream->output_color_space = get_output_color_space(timing_out);
5910
5911         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5912         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5913         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5914                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5915                     drm_mode_is_420_also(info, mode_in) &&
5916                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5917                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5918                         adjust_colour_depth_from_display_info(timing_out, info);
5919                 }
5920         }
5921 }
5922
5923 static void fill_audio_info(struct audio_info *audio_info,
5924                             const struct drm_connector *drm_connector,
5925                             const struct dc_sink *dc_sink)
5926 {
5927         int i = 0;
5928         int cea_revision = 0;
5929         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5930
5931         audio_info->manufacture_id = edid_caps->manufacturer_id;
5932         audio_info->product_id = edid_caps->product_id;
5933
5934         cea_revision = drm_connector->display_info.cea_rev;
5935
5936         strscpy(audio_info->display_name,
5937                 edid_caps->display_name,
5938                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5939
5940         if (cea_revision >= 3) {
5941                 audio_info->mode_count = edid_caps->audio_mode_count;
5942
5943                 for (i = 0; i < audio_info->mode_count; ++i) {
5944                         audio_info->modes[i].format_code =
5945                                         (enum audio_format_code)
5946                                         (edid_caps->audio_modes[i].format_code);
5947                         audio_info->modes[i].channel_count =
5948                                         edid_caps->audio_modes[i].channel_count;
5949                         audio_info->modes[i].sample_rates.all =
5950                                         edid_caps->audio_modes[i].sample_rate;
5951                         audio_info->modes[i].sample_size =
5952                                         edid_caps->audio_modes[i].sample_size;
5953                 }
5954         }
5955
5956         audio_info->flags.all = edid_caps->speaker_flags;
5957
5958         /* TODO: We only check for the progressive mode, check for interlace mode too */
5959         if (drm_connector->latency_present[0]) {
5960                 audio_info->video_latency = drm_connector->video_latency[0];
5961                 audio_info->audio_latency = drm_connector->audio_latency[0];
5962         }
5963
5964         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5965
5966 }
5967
5968 static void
5969 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5970                                       struct drm_display_mode *dst_mode)
5971 {
5972         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5973         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5974         dst_mode->crtc_clock = src_mode->crtc_clock;
5975         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5976         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5977         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5978         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5979         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5980         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5981         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5982         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5983         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5984         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5985         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5986 }
5987
5988 static void
5989 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5990                                         const struct drm_display_mode *native_mode,
5991                                         bool scale_enabled)
5992 {
5993         if (scale_enabled) {
5994                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5995         } else if (native_mode->clock == drm_mode->clock &&
5996                         native_mode->htotal == drm_mode->htotal &&
5997                         native_mode->vtotal == drm_mode->vtotal) {
5998                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5999         } else {
6000                 /* no scaling nor amdgpu inserted, no need to patch */
6001         }
6002 }
6003
6004 static struct dc_sink *
6005 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6006 {
6007         struct dc_sink_init_data sink_init_data = { 0 };
6008         struct dc_sink *sink = NULL;
6009         sink_init_data.link = aconnector->dc_link;
6010         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6011
6012         sink = dc_sink_create(&sink_init_data);
6013         if (!sink) {
6014                 DRM_ERROR("Failed to create sink!\n");
6015                 return NULL;
6016         }
6017         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6018
6019         return sink;
6020 }
6021
6022 static void set_multisync_trigger_params(
6023                 struct dc_stream_state *stream)
6024 {
6025         struct dc_stream_state *master = NULL;
6026
6027         if (stream->triggered_crtc_reset.enabled) {
6028                 master = stream->triggered_crtc_reset.event_source;
6029                 stream->triggered_crtc_reset.event =
6030                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6031                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6032                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6033         }
6034 }
6035
6036 static void set_master_stream(struct dc_stream_state *stream_set[],
6037                               int stream_count)
6038 {
6039         int j, highest_rfr = 0, master_stream = 0;
6040
6041         for (j = 0;  j < stream_count; j++) {
6042                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6043                         int refresh_rate = 0;
6044
6045                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6046                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6047                         if (refresh_rate > highest_rfr) {
6048                                 highest_rfr = refresh_rate;
6049                                 master_stream = j;
6050                         }
6051                 }
6052         }
6053         for (j = 0;  j < stream_count; j++) {
6054                 if (stream_set[j])
6055                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6056         }
6057 }
6058
6059 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6060 {
6061         int i = 0;
6062         struct dc_stream_state *stream;
6063
6064         if (context->stream_count < 2)
6065                 return;
6066         for (i = 0; i < context->stream_count ; i++) {
6067                 if (!context->streams[i])
6068                         continue;
6069                 /*
6070                  * TODO: add a function to read AMD VSDB bits and set
6071                  * crtc_sync_master.multi_sync_enabled flag
6072                  * For now it's set to false
6073                  */
6074         }
6075
6076         set_master_stream(context->streams, context->stream_count);
6077
6078         for (i = 0; i < context->stream_count ; i++) {
6079                 stream = context->streams[i];
6080
6081                 if (!stream)
6082                         continue;
6083
6084                 set_multisync_trigger_params(stream);
6085         }
6086 }
6087
6088 #if defined(CONFIG_DRM_AMD_DC_DCN)
6089 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6090                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6091                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6092 {
6093         stream->timing.flags.DSC = 0;
6094         dsc_caps->is_dsc_supported = false;
6095
6096         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6097                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6098                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6099                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6100                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6101                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6102                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6103                                 dsc_caps);
6104         }
6105 }
6106
6107 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6108                                     struct dc_sink *sink, struct dc_stream_state *stream,
6109                                     struct dsc_dec_dpcd_caps *dsc_caps,
6110                                     uint32_t max_dsc_target_bpp_limit_override)
6111 {
6112         const struct dc_link_settings *verified_link_cap = NULL;
6113         uint32_t link_bw_in_kbps;
6114         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6115         struct dc *dc = sink->ctx->dc;
6116         struct dc_dsc_bw_range bw_range = {0};
6117         struct dc_dsc_config dsc_cfg = {0};
6118
6119         verified_link_cap = dc_link_get_link_cap(stream->link);
6120         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6121         edp_min_bpp_x16 = 8 * 16;
6122         edp_max_bpp_x16 = 8 * 16;
6123
6124         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6125                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6126
6127         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6128                 edp_min_bpp_x16 = edp_max_bpp_x16;
6129
6130         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6131                                 dc->debug.dsc_min_slice_height_override,
6132                                 edp_min_bpp_x16, edp_max_bpp_x16,
6133                                 dsc_caps,
6134                                 &stream->timing,
6135                                 &bw_range)) {
6136
6137                 if (bw_range.max_kbps < link_bw_in_kbps) {
6138                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6139                                         dsc_caps,
6140                                         dc->debug.dsc_min_slice_height_override,
6141                                         max_dsc_target_bpp_limit_override,
6142                                         0,
6143                                         &stream->timing,
6144                                         &dsc_cfg)) {
6145                                 stream->timing.dsc_cfg = dsc_cfg;
6146                                 stream->timing.flags.DSC = 1;
6147                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6148                         }
6149                         return;
6150                 }
6151         }
6152
6153         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6154                                 dsc_caps,
6155                                 dc->debug.dsc_min_slice_height_override,
6156                                 max_dsc_target_bpp_limit_override,
6157                                 link_bw_in_kbps,
6158                                 &stream->timing,
6159                                 &dsc_cfg)) {
6160                 stream->timing.dsc_cfg = dsc_cfg;
6161                 stream->timing.flags.DSC = 1;
6162         }
6163 }
6164
6165 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6166                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6167                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6168 {
6169         struct drm_connector *drm_connector = &aconnector->base;
6170         uint32_t link_bandwidth_kbps;
6171         uint32_t max_dsc_target_bpp_limit_override = 0;
6172         struct dc *dc = sink->ctx->dc;
6173         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6174         uint32_t dsc_max_supported_bw_in_kbps;
6175
6176         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6177                                                         dc_link_get_link_cap(aconnector->dc_link));
6178
6179         if (stream->link && stream->link->local_sink)
6180                 max_dsc_target_bpp_limit_override =
6181                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6182         
6183         /* Set DSC policy according to dsc_clock_en */
6184         dc_dsc_policy_set_enable_dsc_when_not_needed(
6185                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6186
6187         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6188             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6189
6190                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6191
6192         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6193                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6194                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6195                                                 dsc_caps,
6196                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6197                                                 max_dsc_target_bpp_limit_override,
6198                                                 link_bandwidth_kbps,
6199                                                 &stream->timing,
6200                                                 &stream->timing.dsc_cfg)) {
6201                                 stream->timing.flags.DSC = 1;
6202                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6203                                                                  __func__, drm_connector->name);
6204                         }
6205                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6206                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6207                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6208                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6209
6210                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6211                                         max_supported_bw_in_kbps > 0 &&
6212                                         dsc_max_supported_bw_in_kbps > 0)
6213                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6214                                                 dsc_caps,
6215                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6216                                                 max_dsc_target_bpp_limit_override,
6217                                                 dsc_max_supported_bw_in_kbps,
6218                                                 &stream->timing,
6219                                                 &stream->timing.dsc_cfg)) {
6220                                         stream->timing.flags.DSC = 1;
6221                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6222                                                                          __func__, drm_connector->name);
6223                                 }
6224                 }
6225         }
6226
6227         /* Overwrite the stream flag if DSC is enabled through debugfs */
6228         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6229                 stream->timing.flags.DSC = 1;
6230
6231         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6232                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6233
6234         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6235                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6236
6237         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6238                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6239 }
6240 #endif /* CONFIG_DRM_AMD_DC_DCN */
6241
6242 /**
6243  * DOC: FreeSync Video
6244  *
6245  * When a userspace application wants to play a video, the content follows a
6246  * standard format definition that usually specifies the FPS for that format.
6247  * The below list illustrates some video format and the expected FPS,
6248  * respectively:
6249  *
6250  * - TV/NTSC (23.976 FPS)
6251  * - Cinema (24 FPS)
6252  * - TV/PAL (25 FPS)
6253  * - TV/NTSC (29.97 FPS)
6254  * - TV/NTSC (30 FPS)
6255  * - Cinema HFR (48 FPS)
6256  * - TV/PAL (50 FPS)
6257  * - Commonly used (60 FPS)
6258  * - Multiples of 24 (48,72,96,120 FPS)
6259  *
6260  * The list of standards video format is not huge and can be added to the
6261  * connector modeset list beforehand. With that, userspace can leverage
6262  * FreeSync to extends the front porch in order to attain the target refresh
6263  * rate. Such a switch will happen seamlessly, without screen blanking or
6264  * reprogramming of the output in any other way. If the userspace requests a
6265  * modesetting change compatible with FreeSync modes that only differ in the
6266  * refresh rate, DC will skip the full update and avoid blink during the
6267  * transition. For example, the video player can change the modesetting from
6268  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6269  * causing any display blink. This same concept can be applied to a mode
6270  * setting change.
6271  */
6272 static struct drm_display_mode *
6273 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6274                           bool use_probed_modes)
6275 {
6276         struct drm_display_mode *m, *m_pref = NULL;
6277         u16 current_refresh, highest_refresh;
6278         struct list_head *list_head = use_probed_modes ?
6279                                                     &aconnector->base.probed_modes :
6280                                                     &aconnector->base.modes;
6281
6282         if (aconnector->freesync_vid_base.clock != 0)
6283                 return &aconnector->freesync_vid_base;
6284
6285         /* Find the preferred mode */
6286         list_for_each_entry (m, list_head, head) {
6287                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6288                         m_pref = m;
6289                         break;
6290                 }
6291         }
6292
6293         if (!m_pref) {
6294                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6295                 m_pref = list_first_entry_or_null(
6296                         &aconnector->base.modes, struct drm_display_mode, head);
6297                 if (!m_pref) {
6298                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6299                         return NULL;
6300                 }
6301         }
6302
6303         highest_refresh = drm_mode_vrefresh(m_pref);
6304
6305         /*
6306          * Find the mode with highest refresh rate with same resolution.
6307          * For some monitors, preferred mode is not the mode with highest
6308          * supported refresh rate.
6309          */
6310         list_for_each_entry (m, list_head, head) {
6311                 current_refresh  = drm_mode_vrefresh(m);
6312
6313                 if (m->hdisplay == m_pref->hdisplay &&
6314                     m->vdisplay == m_pref->vdisplay &&
6315                     highest_refresh < current_refresh) {
6316                         highest_refresh = current_refresh;
6317                         m_pref = m;
6318                 }
6319         }
6320
6321         aconnector->freesync_vid_base = *m_pref;
6322         return m_pref;
6323 }
6324
6325 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6326                                    struct amdgpu_dm_connector *aconnector)
6327 {
6328         struct drm_display_mode *high_mode;
6329         int timing_diff;
6330
6331         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6332         if (!high_mode || !mode)
6333                 return false;
6334
6335         timing_diff = high_mode->vtotal - mode->vtotal;
6336
6337         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6338             high_mode->hdisplay != mode->hdisplay ||
6339             high_mode->vdisplay != mode->vdisplay ||
6340             high_mode->hsync_start != mode->hsync_start ||
6341             high_mode->hsync_end != mode->hsync_end ||
6342             high_mode->htotal != mode->htotal ||
6343             high_mode->hskew != mode->hskew ||
6344             high_mode->vscan != mode->vscan ||
6345             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6346             high_mode->vsync_end - mode->vsync_end != timing_diff)
6347                 return false;
6348         else
6349                 return true;
6350 }
6351
6352 static struct dc_stream_state *
6353 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6354                        const struct drm_display_mode *drm_mode,
6355                        const struct dm_connector_state *dm_state,
6356                        const struct dc_stream_state *old_stream,
6357                        int requested_bpc)
6358 {
6359         struct drm_display_mode *preferred_mode = NULL;
6360         struct drm_connector *drm_connector;
6361         const struct drm_connector_state *con_state =
6362                 dm_state ? &dm_state->base : NULL;
6363         struct dc_stream_state *stream = NULL;
6364         struct drm_display_mode mode = *drm_mode;
6365         struct drm_display_mode saved_mode;
6366         struct drm_display_mode *freesync_mode = NULL;
6367         bool native_mode_found = false;
6368         bool recalculate_timing = false;
6369         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6370         int mode_refresh;
6371         int preferred_refresh = 0;
6372 #if defined(CONFIG_DRM_AMD_DC_DCN)
6373         struct dsc_dec_dpcd_caps dsc_caps;
6374 #endif
6375         struct dc_sink *sink = NULL;
6376
6377         memset(&saved_mode, 0, sizeof(saved_mode));
6378
6379         if (aconnector == NULL) {
6380                 DRM_ERROR("aconnector is NULL!\n");
6381                 return stream;
6382         }
6383
6384         drm_connector = &aconnector->base;
6385
6386         if (!aconnector->dc_sink) {
6387                 sink = create_fake_sink(aconnector);
6388                 if (!sink)
6389                         return stream;
6390         } else {
6391                 sink = aconnector->dc_sink;
6392                 dc_sink_retain(sink);
6393         }
6394
6395         stream = dc_create_stream_for_sink(sink);
6396
6397         if (stream == NULL) {
6398                 DRM_ERROR("Failed to create stream for sink!\n");
6399                 goto finish;
6400         }
6401
6402         stream->dm_stream_context = aconnector;
6403
6404         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6405                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6406
6407         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6408                 /* Search for preferred mode */
6409                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6410                         native_mode_found = true;
6411                         break;
6412                 }
6413         }
6414         if (!native_mode_found)
6415                 preferred_mode = list_first_entry_or_null(
6416                                 &aconnector->base.modes,
6417                                 struct drm_display_mode,
6418                                 head);
6419
6420         mode_refresh = drm_mode_vrefresh(&mode);
6421
6422         if (preferred_mode == NULL) {
6423                 /*
6424                  * This may not be an error, the use case is when we have no
6425                  * usermode calls to reset and set mode upon hotplug. In this
6426                  * case, we call set mode ourselves to restore the previous mode
6427                  * and the modelist may not be filled in in time.
6428                  */
6429                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6430         } else {
6431                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6432                 if (recalculate_timing) {
6433                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6434                         saved_mode = mode;
6435                         mode = *freesync_mode;
6436                 } else {
6437                         decide_crtc_timing_for_drm_display_mode(
6438                                 &mode, preferred_mode, scale);
6439
6440                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6441                 }
6442         }
6443
6444         if (recalculate_timing)
6445                 drm_mode_set_crtcinfo(&saved_mode, 0);
6446         else if (!dm_state)
6447                 drm_mode_set_crtcinfo(&mode, 0);
6448
6449        /*
6450         * If scaling is enabled and refresh rate didn't change
6451         * we copy the vic and polarities of the old timings
6452         */
6453         if (!scale || mode_refresh != preferred_refresh)
6454                 fill_stream_properties_from_drm_display_mode(
6455                         stream, &mode, &aconnector->base, con_state, NULL,
6456                         requested_bpc);
6457         else
6458                 fill_stream_properties_from_drm_display_mode(
6459                         stream, &mode, &aconnector->base, con_state, old_stream,
6460                         requested_bpc);
6461
6462 #if defined(CONFIG_DRM_AMD_DC_DCN)
6463         /* SST DSC determination policy */
6464         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6465         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6466                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6467 #endif
6468
6469         update_stream_scaling_settings(&mode, dm_state, stream);
6470
6471         fill_audio_info(
6472                 &stream->audio_info,
6473                 drm_connector,
6474                 sink);
6475
6476         update_stream_signal(stream, sink);
6477
6478         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6479                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6480
6481         if (stream->link->psr_settings.psr_feature_enabled) {
6482                 //
6483                 // should decide stream support vsc sdp colorimetry capability
6484                 // before building vsc info packet
6485                 //
6486                 stream->use_vsc_sdp_for_colorimetry = false;
6487                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6488                         stream->use_vsc_sdp_for_colorimetry =
6489                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6490                 } else {
6491                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6492                                 stream->use_vsc_sdp_for_colorimetry = true;
6493                 }
6494                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6495                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6496
6497         }
6498 finish:
6499         dc_sink_release(sink);
6500
6501         return stream;
6502 }
6503
6504 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6505 {
6506         drm_crtc_cleanup(crtc);
6507         kfree(crtc);
6508 }
6509
6510 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6511                                   struct drm_crtc_state *state)
6512 {
6513         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6514
6515         /* TODO Destroy dc_stream objects are stream object is flattened */
6516         if (cur->stream)
6517                 dc_stream_release(cur->stream);
6518
6519
6520         __drm_atomic_helper_crtc_destroy_state(state);
6521
6522
6523         kfree(state);
6524 }
6525
6526 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6527 {
6528         struct dm_crtc_state *state;
6529
6530         if (crtc->state)
6531                 dm_crtc_destroy_state(crtc, crtc->state);
6532
6533         state = kzalloc(sizeof(*state), GFP_KERNEL);
6534         if (WARN_ON(!state))
6535                 return;
6536
6537         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6538 }
6539
6540 static struct drm_crtc_state *
6541 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6542 {
6543         struct dm_crtc_state *state, *cur;
6544
6545         cur = to_dm_crtc_state(crtc->state);
6546
6547         if (WARN_ON(!crtc->state))
6548                 return NULL;
6549
6550         state = kzalloc(sizeof(*state), GFP_KERNEL);
6551         if (!state)
6552                 return NULL;
6553
6554         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6555
6556         if (cur->stream) {
6557                 state->stream = cur->stream;
6558                 dc_stream_retain(state->stream);
6559         }
6560
6561         state->active_planes = cur->active_planes;
6562         state->vrr_infopacket = cur->vrr_infopacket;
6563         state->abm_level = cur->abm_level;
6564         state->vrr_supported = cur->vrr_supported;
6565         state->freesync_config = cur->freesync_config;
6566         state->cm_has_degamma = cur->cm_has_degamma;
6567         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6568         state->force_dpms_off = cur->force_dpms_off;
6569         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6570
6571         return &state->base;
6572 }
6573
6574 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6575 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6576 {
6577         crtc_debugfs_init(crtc);
6578
6579         return 0;
6580 }
6581 #endif
6582
6583 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6584 {
6585         enum dc_irq_source irq_source;
6586         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6587         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6588         int rc;
6589
6590         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6591
6592         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6593
6594         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6595                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6596         return rc;
6597 }
6598
6599 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6600 {
6601         enum dc_irq_source irq_source;
6602         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6603         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6604         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6605 #if defined(CONFIG_DRM_AMD_DC_DCN)
6606         struct amdgpu_display_manager *dm = &adev->dm;
6607         struct vblank_control_work *work;
6608 #endif
6609         int rc = 0;
6610
6611         if (enable) {
6612                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6613                 if (amdgpu_dm_vrr_active(acrtc_state))
6614                         rc = dm_set_vupdate_irq(crtc, true);
6615         } else {
6616                 /* vblank irq off -> vupdate irq off */
6617                 rc = dm_set_vupdate_irq(crtc, false);
6618         }
6619
6620         if (rc)
6621                 return rc;
6622
6623         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6624
6625         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6626                 return -EBUSY;
6627
6628         if (amdgpu_in_reset(adev))
6629                 return 0;
6630
6631 #if defined(CONFIG_DRM_AMD_DC_DCN)
6632         if (dm->vblank_control_workqueue) {
6633                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6634                 if (!work)
6635                         return -ENOMEM;
6636
6637                 INIT_WORK(&work->work, vblank_control_worker);
6638                 work->dm = dm;
6639                 work->acrtc = acrtc;
6640                 work->enable = enable;
6641
6642                 if (acrtc_state->stream) {
6643                         dc_stream_retain(acrtc_state->stream);
6644                         work->stream = acrtc_state->stream;
6645                 }
6646
6647                 queue_work(dm->vblank_control_workqueue, &work->work);
6648         }
6649 #endif
6650
6651         return 0;
6652 }
6653
6654 static int dm_enable_vblank(struct drm_crtc *crtc)
6655 {
6656         return dm_set_vblank(crtc, true);
6657 }
6658
6659 static void dm_disable_vblank(struct drm_crtc *crtc)
6660 {
6661         dm_set_vblank(crtc, false);
6662 }
6663
6664 /* Implemented only the options currently availible for the driver */
6665 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6666         .reset = dm_crtc_reset_state,
6667         .destroy = amdgpu_dm_crtc_destroy,
6668         .set_config = drm_atomic_helper_set_config,
6669         .page_flip = drm_atomic_helper_page_flip,
6670         .atomic_duplicate_state = dm_crtc_duplicate_state,
6671         .atomic_destroy_state = dm_crtc_destroy_state,
6672         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6673         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6674         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6675         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6676         .enable_vblank = dm_enable_vblank,
6677         .disable_vblank = dm_disable_vblank,
6678         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6679 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6680         .late_register = amdgpu_dm_crtc_late_register,
6681 #endif
6682 };
6683
6684 static enum drm_connector_status
6685 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6686 {
6687         bool connected;
6688         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6689
6690         /*
6691          * Notes:
6692          * 1. This interface is NOT called in context of HPD irq.
6693          * 2. This interface *is called* in context of user-mode ioctl. Which
6694          * makes it a bad place for *any* MST-related activity.
6695          */
6696
6697         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6698             !aconnector->fake_enable)
6699                 connected = (aconnector->dc_sink != NULL);
6700         else
6701                 connected = (aconnector->base.force == DRM_FORCE_ON);
6702
6703         update_subconnector_property(aconnector);
6704
6705         return (connected ? connector_status_connected :
6706                         connector_status_disconnected);
6707 }
6708
6709 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6710                                             struct drm_connector_state *connector_state,
6711                                             struct drm_property *property,
6712                                             uint64_t val)
6713 {
6714         struct drm_device *dev = connector->dev;
6715         struct amdgpu_device *adev = drm_to_adev(dev);
6716         struct dm_connector_state *dm_old_state =
6717                 to_dm_connector_state(connector->state);
6718         struct dm_connector_state *dm_new_state =
6719                 to_dm_connector_state(connector_state);
6720
6721         int ret = -EINVAL;
6722
6723         if (property == dev->mode_config.scaling_mode_property) {
6724                 enum amdgpu_rmx_type rmx_type;
6725
6726                 switch (val) {
6727                 case DRM_MODE_SCALE_CENTER:
6728                         rmx_type = RMX_CENTER;
6729                         break;
6730                 case DRM_MODE_SCALE_ASPECT:
6731                         rmx_type = RMX_ASPECT;
6732                         break;
6733                 case DRM_MODE_SCALE_FULLSCREEN:
6734                         rmx_type = RMX_FULL;
6735                         break;
6736                 case DRM_MODE_SCALE_NONE:
6737                 default:
6738                         rmx_type = RMX_OFF;
6739                         break;
6740                 }
6741
6742                 if (dm_old_state->scaling == rmx_type)
6743                         return 0;
6744
6745                 dm_new_state->scaling = rmx_type;
6746                 ret = 0;
6747         } else if (property == adev->mode_info.underscan_hborder_property) {
6748                 dm_new_state->underscan_hborder = val;
6749                 ret = 0;
6750         } else if (property == adev->mode_info.underscan_vborder_property) {
6751                 dm_new_state->underscan_vborder = val;
6752                 ret = 0;
6753         } else if (property == adev->mode_info.underscan_property) {
6754                 dm_new_state->underscan_enable = val;
6755                 ret = 0;
6756         } else if (property == adev->mode_info.abm_level_property) {
6757                 dm_new_state->abm_level = val;
6758                 ret = 0;
6759         }
6760
6761         return ret;
6762 }
6763
6764 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6765                                             const struct drm_connector_state *state,
6766                                             struct drm_property *property,
6767                                             uint64_t *val)
6768 {
6769         struct drm_device *dev = connector->dev;
6770         struct amdgpu_device *adev = drm_to_adev(dev);
6771         struct dm_connector_state *dm_state =
6772                 to_dm_connector_state(state);
6773         int ret = -EINVAL;
6774
6775         if (property == dev->mode_config.scaling_mode_property) {
6776                 switch (dm_state->scaling) {
6777                 case RMX_CENTER:
6778                         *val = DRM_MODE_SCALE_CENTER;
6779                         break;
6780                 case RMX_ASPECT:
6781                         *val = DRM_MODE_SCALE_ASPECT;
6782                         break;
6783                 case RMX_FULL:
6784                         *val = DRM_MODE_SCALE_FULLSCREEN;
6785                         break;
6786                 case RMX_OFF:
6787                 default:
6788                         *val = DRM_MODE_SCALE_NONE;
6789                         break;
6790                 }
6791                 ret = 0;
6792         } else if (property == adev->mode_info.underscan_hborder_property) {
6793                 *val = dm_state->underscan_hborder;
6794                 ret = 0;
6795         } else if (property == adev->mode_info.underscan_vborder_property) {
6796                 *val = dm_state->underscan_vborder;
6797                 ret = 0;
6798         } else if (property == adev->mode_info.underscan_property) {
6799                 *val = dm_state->underscan_enable;
6800                 ret = 0;
6801         } else if (property == adev->mode_info.abm_level_property) {
6802                 *val = dm_state->abm_level;
6803                 ret = 0;
6804         }
6805
6806         return ret;
6807 }
6808
6809 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6810 {
6811         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6812
6813         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6814 }
6815
6816 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6817 {
6818         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6819         const struct dc_link *link = aconnector->dc_link;
6820         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6821         struct amdgpu_display_manager *dm = &adev->dm;
6822         int i;
6823
6824         /*
6825          * Call only if mst_mgr was iniitalized before since it's not done
6826          * for all connector types.
6827          */
6828         if (aconnector->mst_mgr.dev)
6829                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6830
6831 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6832         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6833         for (i = 0; i < dm->num_of_edps; i++) {
6834                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6835                         backlight_device_unregister(dm->backlight_dev[i]);
6836                         dm->backlight_dev[i] = NULL;
6837                 }
6838         }
6839 #endif
6840
6841         if (aconnector->dc_em_sink)
6842                 dc_sink_release(aconnector->dc_em_sink);
6843         aconnector->dc_em_sink = NULL;
6844         if (aconnector->dc_sink)
6845                 dc_sink_release(aconnector->dc_sink);
6846         aconnector->dc_sink = NULL;
6847
6848         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6849         drm_connector_unregister(connector);
6850         drm_connector_cleanup(connector);
6851         if (aconnector->i2c) {
6852                 i2c_del_adapter(&aconnector->i2c->base);
6853                 kfree(aconnector->i2c);
6854         }
6855         kfree(aconnector->dm_dp_aux.aux.name);
6856
6857         kfree(connector);
6858 }
6859
6860 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6861 {
6862         struct dm_connector_state *state =
6863                 to_dm_connector_state(connector->state);
6864
6865         if (connector->state)
6866                 __drm_atomic_helper_connector_destroy_state(connector->state);
6867
6868         kfree(state);
6869
6870         state = kzalloc(sizeof(*state), GFP_KERNEL);
6871
6872         if (state) {
6873                 state->scaling = RMX_OFF;
6874                 state->underscan_enable = false;
6875                 state->underscan_hborder = 0;
6876                 state->underscan_vborder = 0;
6877                 state->base.max_requested_bpc = 8;
6878                 state->vcpi_slots = 0;
6879                 state->pbn = 0;
6880                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6881                         state->abm_level = amdgpu_dm_abm_level;
6882
6883                 __drm_atomic_helper_connector_reset(connector, &state->base);
6884         }
6885 }
6886
6887 struct drm_connector_state *
6888 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6889 {
6890         struct dm_connector_state *state =
6891                 to_dm_connector_state(connector->state);
6892
6893         struct dm_connector_state *new_state =
6894                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6895
6896         if (!new_state)
6897                 return NULL;
6898
6899         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6900
6901         new_state->freesync_capable = state->freesync_capable;
6902         new_state->abm_level = state->abm_level;
6903         new_state->scaling = state->scaling;
6904         new_state->underscan_enable = state->underscan_enable;
6905         new_state->underscan_hborder = state->underscan_hborder;
6906         new_state->underscan_vborder = state->underscan_vborder;
6907         new_state->vcpi_slots = state->vcpi_slots;
6908         new_state->pbn = state->pbn;
6909         return &new_state->base;
6910 }
6911
6912 static int
6913 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6914 {
6915         struct amdgpu_dm_connector *amdgpu_dm_connector =
6916                 to_amdgpu_dm_connector(connector);
6917         int r;
6918
6919         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6920             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6921                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6922                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6923                 if (r)
6924                         return r;
6925         }
6926
6927 #if defined(CONFIG_DEBUG_FS)
6928         connector_debugfs_init(amdgpu_dm_connector);
6929 #endif
6930
6931         return 0;
6932 }
6933
6934 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6935         .reset = amdgpu_dm_connector_funcs_reset,
6936         .detect = amdgpu_dm_connector_detect,
6937         .fill_modes = drm_helper_probe_single_connector_modes,
6938         .destroy = amdgpu_dm_connector_destroy,
6939         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6940         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6941         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6942         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6943         .late_register = amdgpu_dm_connector_late_register,
6944         .early_unregister = amdgpu_dm_connector_unregister
6945 };
6946
6947 static int get_modes(struct drm_connector *connector)
6948 {
6949         return amdgpu_dm_connector_get_modes(connector);
6950 }
6951
6952 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6953 {
6954         struct dc_sink_init_data init_params = {
6955                         .link = aconnector->dc_link,
6956                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6957         };
6958         struct edid *edid;
6959
6960         if (!aconnector->base.edid_blob_ptr) {
6961                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6962                                 aconnector->base.name);
6963
6964                 aconnector->base.force = DRM_FORCE_OFF;
6965                 aconnector->base.override_edid = false;
6966                 return;
6967         }
6968
6969         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6970
6971         aconnector->edid = edid;
6972
6973         aconnector->dc_em_sink = dc_link_add_remote_sink(
6974                 aconnector->dc_link,
6975                 (uint8_t *)edid,
6976                 (edid->extensions + 1) * EDID_LENGTH,
6977                 &init_params);
6978
6979         if (aconnector->base.force == DRM_FORCE_ON) {
6980                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6981                 aconnector->dc_link->local_sink :
6982                 aconnector->dc_em_sink;
6983                 dc_sink_retain(aconnector->dc_sink);
6984         }
6985 }
6986
6987 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6988 {
6989         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6990
6991         /*
6992          * In case of headless boot with force on for DP managed connector
6993          * Those settings have to be != 0 to get initial modeset
6994          */
6995         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6996                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6997                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6998         }
6999
7000
7001         aconnector->base.override_edid = true;
7002         create_eml_sink(aconnector);
7003 }
7004
7005 static struct dc_stream_state *
7006 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7007                                 const struct drm_display_mode *drm_mode,
7008                                 const struct dm_connector_state *dm_state,
7009                                 const struct dc_stream_state *old_stream)
7010 {
7011         struct drm_connector *connector = &aconnector->base;
7012         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7013         struct dc_stream_state *stream;
7014         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7015         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7016         enum dc_status dc_result = DC_OK;
7017
7018         do {
7019                 stream = create_stream_for_sink(aconnector, drm_mode,
7020                                                 dm_state, old_stream,
7021                                                 requested_bpc);
7022                 if (stream == NULL) {
7023                         DRM_ERROR("Failed to create stream for sink!\n");
7024                         break;
7025                 }
7026
7027                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7028
7029                 if (dc_result != DC_OK) {
7030                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7031                                       drm_mode->hdisplay,
7032                                       drm_mode->vdisplay,
7033                                       drm_mode->clock,
7034                                       dc_result,
7035                                       dc_status_to_str(dc_result));
7036
7037                         dc_stream_release(stream);
7038                         stream = NULL;
7039                         requested_bpc -= 2; /* lower bpc to retry validation */
7040                 }
7041
7042         } while (stream == NULL && requested_bpc >= 6);
7043
7044         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7045                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7046
7047                 aconnector->force_yuv420_output = true;
7048                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7049                                                 dm_state, old_stream);
7050                 aconnector->force_yuv420_output = false;
7051         }
7052
7053         return stream;
7054 }
7055
7056 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7057                                    struct drm_display_mode *mode)
7058 {
7059         int result = MODE_ERROR;
7060         struct dc_sink *dc_sink;
7061         /* TODO: Unhardcode stream count */
7062         struct dc_stream_state *stream;
7063         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7064
7065         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7066                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7067                 return result;
7068
7069         /*
7070          * Only run this the first time mode_valid is called to initilialize
7071          * EDID mgmt
7072          */
7073         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7074                 !aconnector->dc_em_sink)
7075                 handle_edid_mgmt(aconnector);
7076
7077         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7078
7079         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7080                                 aconnector->base.force != DRM_FORCE_ON) {
7081                 DRM_ERROR("dc_sink is NULL!\n");
7082                 goto fail;
7083         }
7084
7085         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7086         if (stream) {
7087                 dc_stream_release(stream);
7088                 result = MODE_OK;
7089         }
7090
7091 fail:
7092         /* TODO: error handling*/
7093         return result;
7094 }
7095
7096 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7097                                 struct dc_info_packet *out)
7098 {
7099         struct hdmi_drm_infoframe frame;
7100         unsigned char buf[30]; /* 26 + 4 */
7101         ssize_t len;
7102         int ret, i;
7103
7104         memset(out, 0, sizeof(*out));
7105
7106         if (!state->hdr_output_metadata)
7107                 return 0;
7108
7109         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7110         if (ret)
7111                 return ret;
7112
7113         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7114         if (len < 0)
7115                 return (int)len;
7116
7117         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7118         if (len != 30)
7119                 return -EINVAL;
7120
7121         /* Prepare the infopacket for DC. */
7122         switch (state->connector->connector_type) {
7123         case DRM_MODE_CONNECTOR_HDMIA:
7124                 out->hb0 = 0x87; /* type */
7125                 out->hb1 = 0x01; /* version */
7126                 out->hb2 = 0x1A; /* length */
7127                 out->sb[0] = buf[3]; /* checksum */
7128                 i = 1;
7129                 break;
7130
7131         case DRM_MODE_CONNECTOR_DisplayPort:
7132         case DRM_MODE_CONNECTOR_eDP:
7133                 out->hb0 = 0x00; /* sdp id, zero */
7134                 out->hb1 = 0x87; /* type */
7135                 out->hb2 = 0x1D; /* payload len - 1 */
7136                 out->hb3 = (0x13 << 2); /* sdp version */
7137                 out->sb[0] = 0x01; /* version */
7138                 out->sb[1] = 0x1A; /* length */
7139                 i = 2;
7140                 break;
7141
7142         default:
7143                 return -EINVAL;
7144         }
7145
7146         memcpy(&out->sb[i], &buf[4], 26);
7147         out->valid = true;
7148
7149         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7150                        sizeof(out->sb), false);
7151
7152         return 0;
7153 }
7154
7155 static int
7156 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7157                                  struct drm_atomic_state *state)
7158 {
7159         struct drm_connector_state *new_con_state =
7160                 drm_atomic_get_new_connector_state(state, conn);
7161         struct drm_connector_state *old_con_state =
7162                 drm_atomic_get_old_connector_state(state, conn);
7163         struct drm_crtc *crtc = new_con_state->crtc;
7164         struct drm_crtc_state *new_crtc_state;
7165         int ret;
7166
7167         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7168
7169         if (!crtc)
7170                 return 0;
7171
7172         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7173                 struct dc_info_packet hdr_infopacket;
7174
7175                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7176                 if (ret)
7177                         return ret;
7178
7179                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7180                 if (IS_ERR(new_crtc_state))
7181                         return PTR_ERR(new_crtc_state);
7182
7183                 /*
7184                  * DC considers the stream backends changed if the
7185                  * static metadata changes. Forcing the modeset also
7186                  * gives a simple way for userspace to switch from
7187                  * 8bpc to 10bpc when setting the metadata to enter
7188                  * or exit HDR.
7189                  *
7190                  * Changing the static metadata after it's been
7191                  * set is permissible, however. So only force a
7192                  * modeset if we're entering or exiting HDR.
7193                  */
7194                 new_crtc_state->mode_changed =
7195                         !old_con_state->hdr_output_metadata ||
7196                         !new_con_state->hdr_output_metadata;
7197         }
7198
7199         return 0;
7200 }
7201
7202 static const struct drm_connector_helper_funcs
7203 amdgpu_dm_connector_helper_funcs = {
7204         /*
7205          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7206          * modes will be filtered by drm_mode_validate_size(), and those modes
7207          * are missing after user start lightdm. So we need to renew modes list.
7208          * in get_modes call back, not just return the modes count
7209          */
7210         .get_modes = get_modes,
7211         .mode_valid = amdgpu_dm_connector_mode_valid,
7212         .atomic_check = amdgpu_dm_connector_atomic_check,
7213 };
7214
7215 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7216 {
7217 }
7218
7219 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7220 {
7221         struct drm_atomic_state *state = new_crtc_state->state;
7222         struct drm_plane *plane;
7223         int num_active = 0;
7224
7225         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7226                 struct drm_plane_state *new_plane_state;
7227
7228                 /* Cursor planes are "fake". */
7229                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7230                         continue;
7231
7232                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7233
7234                 if (!new_plane_state) {
7235                         /*
7236                          * The plane is enable on the CRTC and hasn't changed
7237                          * state. This means that it previously passed
7238                          * validation and is therefore enabled.
7239                          */
7240                         num_active += 1;
7241                         continue;
7242                 }
7243
7244                 /* We need a framebuffer to be considered enabled. */
7245                 num_active += (new_plane_state->fb != NULL);
7246         }
7247
7248         return num_active;
7249 }
7250
7251 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7252                                          struct drm_crtc_state *new_crtc_state)
7253 {
7254         struct dm_crtc_state *dm_new_crtc_state =
7255                 to_dm_crtc_state(new_crtc_state);
7256
7257         dm_new_crtc_state->active_planes = 0;
7258
7259         if (!dm_new_crtc_state->stream)
7260                 return;
7261
7262         dm_new_crtc_state->active_planes =
7263                 count_crtc_active_planes(new_crtc_state);
7264 }
7265
7266 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7267                                        struct drm_atomic_state *state)
7268 {
7269         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7270                                                                           crtc);
7271         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7272         struct dc *dc = adev->dm.dc;
7273         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7274         int ret = -EINVAL;
7275
7276         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7277
7278         dm_update_crtc_active_planes(crtc, crtc_state);
7279
7280         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7281                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7282                 return ret;
7283         }
7284
7285         /*
7286          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7287          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7288          * planes are disabled, which is not supported by the hardware. And there is legacy
7289          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7290          */
7291         if (crtc_state->enable &&
7292             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7293                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7294                 return -EINVAL;
7295         }
7296
7297         /* In some use cases, like reset, no stream is attached */
7298         if (!dm_crtc_state->stream)
7299                 return 0;
7300
7301         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7302                 return 0;
7303
7304         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7305         return ret;
7306 }
7307
7308 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7309                                       const struct drm_display_mode *mode,
7310                                       struct drm_display_mode *adjusted_mode)
7311 {
7312         return true;
7313 }
7314
7315 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7316         .disable = dm_crtc_helper_disable,
7317         .atomic_check = dm_crtc_helper_atomic_check,
7318         .mode_fixup = dm_crtc_helper_mode_fixup,
7319         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7320 };
7321
7322 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7323 {
7324
7325 }
7326
7327 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7328 {
7329         switch (display_color_depth) {
7330                 case COLOR_DEPTH_666:
7331                         return 6;
7332                 case COLOR_DEPTH_888:
7333                         return 8;
7334                 case COLOR_DEPTH_101010:
7335                         return 10;
7336                 case COLOR_DEPTH_121212:
7337                         return 12;
7338                 case COLOR_DEPTH_141414:
7339                         return 14;
7340                 case COLOR_DEPTH_161616:
7341                         return 16;
7342                 default:
7343                         break;
7344                 }
7345         return 0;
7346 }
7347
7348 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7349                                           struct drm_crtc_state *crtc_state,
7350                                           struct drm_connector_state *conn_state)
7351 {
7352         struct drm_atomic_state *state = crtc_state->state;
7353         struct drm_connector *connector = conn_state->connector;
7354         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7355         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7356         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7357         struct drm_dp_mst_topology_mgr *mst_mgr;
7358         struct drm_dp_mst_port *mst_port;
7359         enum dc_color_depth color_depth;
7360         int clock, bpp = 0;
7361         bool is_y420 = false;
7362
7363         if (!aconnector->port || !aconnector->dc_sink)
7364                 return 0;
7365
7366         mst_port = aconnector->port;
7367         mst_mgr = &aconnector->mst_port->mst_mgr;
7368
7369         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7370                 return 0;
7371
7372         if (!state->duplicated) {
7373                 int max_bpc = conn_state->max_requested_bpc;
7374                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7375                                 aconnector->force_yuv420_output;
7376                 color_depth = convert_color_depth_from_display_info(connector,
7377                                                                     is_y420,
7378                                                                     max_bpc);
7379                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7380                 clock = adjusted_mode->clock;
7381                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7382         }
7383         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7384                                                                            mst_mgr,
7385                                                                            mst_port,
7386                                                                            dm_new_connector_state->pbn,
7387                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7388         if (dm_new_connector_state->vcpi_slots < 0) {
7389                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7390                 return dm_new_connector_state->vcpi_slots;
7391         }
7392         return 0;
7393 }
7394
7395 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7396         .disable = dm_encoder_helper_disable,
7397         .atomic_check = dm_encoder_helper_atomic_check
7398 };
7399
7400 #if defined(CONFIG_DRM_AMD_DC_DCN)
7401 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7402                                             struct dc_state *dc_state,
7403                                             struct dsc_mst_fairness_vars *vars)
7404 {
7405         struct dc_stream_state *stream = NULL;
7406         struct drm_connector *connector;
7407         struct drm_connector_state *new_con_state;
7408         struct amdgpu_dm_connector *aconnector;
7409         struct dm_connector_state *dm_conn_state;
7410         int i, j;
7411         int vcpi, pbn_div, pbn, slot_num = 0;
7412
7413         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7414
7415                 aconnector = to_amdgpu_dm_connector(connector);
7416
7417                 if (!aconnector->port)
7418                         continue;
7419
7420                 if (!new_con_state || !new_con_state->crtc)
7421                         continue;
7422
7423                 dm_conn_state = to_dm_connector_state(new_con_state);
7424
7425                 for (j = 0; j < dc_state->stream_count; j++) {
7426                         stream = dc_state->streams[j];
7427                         if (!stream)
7428                                 continue;
7429
7430                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7431                                 break;
7432
7433                         stream = NULL;
7434                 }
7435
7436                 if (!stream)
7437                         continue;
7438
7439                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7440                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7441                 for (j = 0; j < dc_state->stream_count; j++) {
7442                         if (vars[j].aconnector == aconnector) {
7443                                 pbn = vars[j].pbn;
7444                                 break;
7445                         }
7446                 }
7447
7448                 if (j == dc_state->stream_count)
7449                         continue;
7450
7451                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7452
7453                 if (stream->timing.flags.DSC != 1) {
7454                         dm_conn_state->pbn = pbn;
7455                         dm_conn_state->vcpi_slots = slot_num;
7456
7457                         drm_dp_mst_atomic_enable_dsc(state,
7458                                                      aconnector->port,
7459                                                      dm_conn_state->pbn,
7460                                                      0,
7461                                                      false);
7462                         continue;
7463                 }
7464
7465                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7466                                                     aconnector->port,
7467                                                     pbn, pbn_div,
7468                                                     true);
7469                 if (vcpi < 0)
7470                         return vcpi;
7471
7472                 dm_conn_state->pbn = pbn;
7473                 dm_conn_state->vcpi_slots = vcpi;
7474         }
7475         return 0;
7476 }
7477 #endif
7478
7479 static void dm_drm_plane_reset(struct drm_plane *plane)
7480 {
7481         struct dm_plane_state *amdgpu_state = NULL;
7482
7483         if (plane->state)
7484                 plane->funcs->atomic_destroy_state(plane, plane->state);
7485
7486         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7487         WARN_ON(amdgpu_state == NULL);
7488
7489         if (amdgpu_state)
7490                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7491 }
7492
7493 static struct drm_plane_state *
7494 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7495 {
7496         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7497
7498         old_dm_plane_state = to_dm_plane_state(plane->state);
7499         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7500         if (!dm_plane_state)
7501                 return NULL;
7502
7503         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7504
7505         if (old_dm_plane_state->dc_state) {
7506                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7507                 dc_plane_state_retain(dm_plane_state->dc_state);
7508         }
7509
7510         return &dm_plane_state->base;
7511 }
7512
7513 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7514                                 struct drm_plane_state *state)
7515 {
7516         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7517
7518         if (dm_plane_state->dc_state)
7519                 dc_plane_state_release(dm_plane_state->dc_state);
7520
7521         drm_atomic_helper_plane_destroy_state(plane, state);
7522 }
7523
7524 static const struct drm_plane_funcs dm_plane_funcs = {
7525         .update_plane   = drm_atomic_helper_update_plane,
7526         .disable_plane  = drm_atomic_helper_disable_plane,
7527         .destroy        = drm_primary_helper_destroy,
7528         .reset = dm_drm_plane_reset,
7529         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7530         .atomic_destroy_state = dm_drm_plane_destroy_state,
7531         .format_mod_supported = dm_plane_format_mod_supported,
7532 };
7533
7534 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7535                                       struct drm_plane_state *new_state)
7536 {
7537         struct amdgpu_framebuffer *afb;
7538         struct drm_gem_object *obj;
7539         struct amdgpu_device *adev;
7540         struct amdgpu_bo *rbo;
7541         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7542         struct list_head list;
7543         struct ttm_validate_buffer tv;
7544         struct ww_acquire_ctx ticket;
7545         uint32_t domain;
7546         int r;
7547
7548         if (!new_state->fb) {
7549                 DRM_DEBUG_KMS("No FB bound\n");
7550                 return 0;
7551         }
7552
7553         afb = to_amdgpu_framebuffer(new_state->fb);
7554         obj = new_state->fb->obj[0];
7555         rbo = gem_to_amdgpu_bo(obj);
7556         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7557         INIT_LIST_HEAD(&list);
7558
7559         tv.bo = &rbo->tbo;
7560         tv.num_shared = 1;
7561         list_add(&tv.head, &list);
7562
7563         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7564         if (r) {
7565                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7566                 return r;
7567         }
7568
7569         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7570                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7571         else
7572                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7573
7574         r = amdgpu_bo_pin(rbo, domain);
7575         if (unlikely(r != 0)) {
7576                 if (r != -ERESTARTSYS)
7577                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7578                 ttm_eu_backoff_reservation(&ticket, &list);
7579                 return r;
7580         }
7581
7582         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7583         if (unlikely(r != 0)) {
7584                 amdgpu_bo_unpin(rbo);
7585                 ttm_eu_backoff_reservation(&ticket, &list);
7586                 DRM_ERROR("%p bind failed\n", rbo);
7587                 return r;
7588         }
7589
7590         ttm_eu_backoff_reservation(&ticket, &list);
7591
7592         afb->address = amdgpu_bo_gpu_offset(rbo);
7593
7594         amdgpu_bo_ref(rbo);
7595
7596         /**
7597          * We don't do surface updates on planes that have been newly created,
7598          * but we also don't have the afb->address during atomic check.
7599          *
7600          * Fill in buffer attributes depending on the address here, but only on
7601          * newly created planes since they're not being used by DC yet and this
7602          * won't modify global state.
7603          */
7604         dm_plane_state_old = to_dm_plane_state(plane->state);
7605         dm_plane_state_new = to_dm_plane_state(new_state);
7606
7607         if (dm_plane_state_new->dc_state &&
7608             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7609                 struct dc_plane_state *plane_state =
7610                         dm_plane_state_new->dc_state;
7611                 bool force_disable_dcc = !plane_state->dcc.enable;
7612
7613                 fill_plane_buffer_attributes(
7614                         adev, afb, plane_state->format, plane_state->rotation,
7615                         afb->tiling_flags,
7616                         &plane_state->tiling_info, &plane_state->plane_size,
7617                         &plane_state->dcc, &plane_state->address,
7618                         afb->tmz_surface, force_disable_dcc);
7619         }
7620
7621         return 0;
7622 }
7623
7624 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7625                                        struct drm_plane_state *old_state)
7626 {
7627         struct amdgpu_bo *rbo;
7628         int r;
7629
7630         if (!old_state->fb)
7631                 return;
7632
7633         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7634         r = amdgpu_bo_reserve(rbo, false);
7635         if (unlikely(r)) {
7636                 DRM_ERROR("failed to reserve rbo before unpin\n");
7637                 return;
7638         }
7639
7640         amdgpu_bo_unpin(rbo);
7641         amdgpu_bo_unreserve(rbo);
7642         amdgpu_bo_unref(&rbo);
7643 }
7644
7645 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7646                                        struct drm_crtc_state *new_crtc_state)
7647 {
7648         struct drm_framebuffer *fb = state->fb;
7649         int min_downscale, max_upscale;
7650         int min_scale = 0;
7651         int max_scale = INT_MAX;
7652
7653         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7654         if (fb && state->crtc) {
7655                 /* Validate viewport to cover the case when only the position changes */
7656                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7657                         int viewport_width = state->crtc_w;
7658                         int viewport_height = state->crtc_h;
7659
7660                         if (state->crtc_x < 0)
7661                                 viewport_width += state->crtc_x;
7662                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7663                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7664
7665                         if (state->crtc_y < 0)
7666                                 viewport_height += state->crtc_y;
7667                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7668                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7669
7670                         if (viewport_width < 0 || viewport_height < 0) {
7671                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7672                                 return -EINVAL;
7673                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7674                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7675                                 return -EINVAL;
7676                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7677                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7678                                 return -EINVAL;
7679                         }
7680
7681                 }
7682
7683                 /* Get min/max allowed scaling factors from plane caps. */
7684                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7685                                              &min_downscale, &max_upscale);
7686                 /*
7687                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7688                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7689                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7690                  */
7691                 min_scale = (1000 << 16) / max_upscale;
7692                 max_scale = (1000 << 16) / min_downscale;
7693         }
7694
7695         return drm_atomic_helper_check_plane_state(
7696                 state, new_crtc_state, min_scale, max_scale, true, true);
7697 }
7698
7699 static int dm_plane_atomic_check(struct drm_plane *plane,
7700                                  struct drm_atomic_state *state)
7701 {
7702         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7703                                                                                  plane);
7704         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7705         struct dc *dc = adev->dm.dc;
7706         struct dm_plane_state *dm_plane_state;
7707         struct dc_scaling_info scaling_info;
7708         struct drm_crtc_state *new_crtc_state;
7709         int ret;
7710
7711         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7712
7713         dm_plane_state = to_dm_plane_state(new_plane_state);
7714
7715         if (!dm_plane_state->dc_state)
7716                 return 0;
7717
7718         new_crtc_state =
7719                 drm_atomic_get_new_crtc_state(state,
7720                                               new_plane_state->crtc);
7721         if (!new_crtc_state)
7722                 return -EINVAL;
7723
7724         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7725         if (ret)
7726                 return ret;
7727
7728         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7729         if (ret)
7730                 return ret;
7731
7732         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7733                 return 0;
7734
7735         return -EINVAL;
7736 }
7737
7738 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7739                                        struct drm_atomic_state *state)
7740 {
7741         /* Only support async updates on cursor planes. */
7742         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7743                 return -EINVAL;
7744
7745         return 0;
7746 }
7747
7748 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7749                                          struct drm_atomic_state *state)
7750 {
7751         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7752                                                                            plane);
7753         struct drm_plane_state *old_state =
7754                 drm_atomic_get_old_plane_state(state, plane);
7755
7756         trace_amdgpu_dm_atomic_update_cursor(new_state);
7757
7758         swap(plane->state->fb, new_state->fb);
7759
7760         plane->state->src_x = new_state->src_x;
7761         plane->state->src_y = new_state->src_y;
7762         plane->state->src_w = new_state->src_w;
7763         plane->state->src_h = new_state->src_h;
7764         plane->state->crtc_x = new_state->crtc_x;
7765         plane->state->crtc_y = new_state->crtc_y;
7766         plane->state->crtc_w = new_state->crtc_w;
7767         plane->state->crtc_h = new_state->crtc_h;
7768
7769         handle_cursor_update(plane, old_state);
7770 }
7771
7772 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7773         .prepare_fb = dm_plane_helper_prepare_fb,
7774         .cleanup_fb = dm_plane_helper_cleanup_fb,
7775         .atomic_check = dm_plane_atomic_check,
7776         .atomic_async_check = dm_plane_atomic_async_check,
7777         .atomic_async_update = dm_plane_atomic_async_update
7778 };
7779
7780 /*
7781  * TODO: these are currently initialized to rgb formats only.
7782  * For future use cases we should either initialize them dynamically based on
7783  * plane capabilities, or initialize this array to all formats, so internal drm
7784  * check will succeed, and let DC implement proper check
7785  */
7786 static const uint32_t rgb_formats[] = {
7787         DRM_FORMAT_XRGB8888,
7788         DRM_FORMAT_ARGB8888,
7789         DRM_FORMAT_RGBA8888,
7790         DRM_FORMAT_XRGB2101010,
7791         DRM_FORMAT_XBGR2101010,
7792         DRM_FORMAT_ARGB2101010,
7793         DRM_FORMAT_ABGR2101010,
7794         DRM_FORMAT_XRGB16161616,
7795         DRM_FORMAT_XBGR16161616,
7796         DRM_FORMAT_ARGB16161616,
7797         DRM_FORMAT_ABGR16161616,
7798         DRM_FORMAT_XBGR8888,
7799         DRM_FORMAT_ABGR8888,
7800         DRM_FORMAT_RGB565,
7801 };
7802
7803 static const uint32_t overlay_formats[] = {
7804         DRM_FORMAT_XRGB8888,
7805         DRM_FORMAT_ARGB8888,
7806         DRM_FORMAT_RGBA8888,
7807         DRM_FORMAT_XBGR8888,
7808         DRM_FORMAT_ABGR8888,
7809         DRM_FORMAT_RGB565
7810 };
7811
7812 static const u32 cursor_formats[] = {
7813         DRM_FORMAT_ARGB8888
7814 };
7815
7816 static int get_plane_formats(const struct drm_plane *plane,
7817                              const struct dc_plane_cap *plane_cap,
7818                              uint32_t *formats, int max_formats)
7819 {
7820         int i, num_formats = 0;
7821
7822         /*
7823          * TODO: Query support for each group of formats directly from
7824          * DC plane caps. This will require adding more formats to the
7825          * caps list.
7826          */
7827
7828         switch (plane->type) {
7829         case DRM_PLANE_TYPE_PRIMARY:
7830                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7831                         if (num_formats >= max_formats)
7832                                 break;
7833
7834                         formats[num_formats++] = rgb_formats[i];
7835                 }
7836
7837                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7838                         formats[num_formats++] = DRM_FORMAT_NV12;
7839                 if (plane_cap && plane_cap->pixel_format_support.p010)
7840                         formats[num_formats++] = DRM_FORMAT_P010;
7841                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7842                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7843                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7844                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7845                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7846                 }
7847                 break;
7848
7849         case DRM_PLANE_TYPE_OVERLAY:
7850                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7851                         if (num_formats >= max_formats)
7852                                 break;
7853
7854                         formats[num_formats++] = overlay_formats[i];
7855                 }
7856                 break;
7857
7858         case DRM_PLANE_TYPE_CURSOR:
7859                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7860                         if (num_formats >= max_formats)
7861                                 break;
7862
7863                         formats[num_formats++] = cursor_formats[i];
7864                 }
7865                 break;
7866         }
7867
7868         return num_formats;
7869 }
7870
7871 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7872                                 struct drm_plane *plane,
7873                                 unsigned long possible_crtcs,
7874                                 const struct dc_plane_cap *plane_cap)
7875 {
7876         uint32_t formats[32];
7877         int num_formats;
7878         int res = -EPERM;
7879         unsigned int supported_rotations;
7880         uint64_t *modifiers = NULL;
7881
7882         num_formats = get_plane_formats(plane, plane_cap, formats,
7883                                         ARRAY_SIZE(formats));
7884
7885         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7886         if (res)
7887                 return res;
7888
7889         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7890                                        &dm_plane_funcs, formats, num_formats,
7891                                        modifiers, plane->type, NULL);
7892         kfree(modifiers);
7893         if (res)
7894                 return res;
7895
7896         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7897             plane_cap && plane_cap->per_pixel_alpha) {
7898                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7899                                           BIT(DRM_MODE_BLEND_PREMULTI);
7900
7901                 drm_plane_create_alpha_property(plane);
7902                 drm_plane_create_blend_mode_property(plane, blend_caps);
7903         }
7904
7905         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7906             plane_cap &&
7907             (plane_cap->pixel_format_support.nv12 ||
7908              plane_cap->pixel_format_support.p010)) {
7909                 /* This only affects YUV formats. */
7910                 drm_plane_create_color_properties(
7911                         plane,
7912                         BIT(DRM_COLOR_YCBCR_BT601) |
7913                         BIT(DRM_COLOR_YCBCR_BT709) |
7914                         BIT(DRM_COLOR_YCBCR_BT2020),
7915                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7916                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7917                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7918         }
7919
7920         supported_rotations =
7921                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7922                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7923
7924         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7925             plane->type != DRM_PLANE_TYPE_CURSOR)
7926                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7927                                                    supported_rotations);
7928
7929         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7930
7931         /* Create (reset) the plane state */
7932         if (plane->funcs->reset)
7933                 plane->funcs->reset(plane);
7934
7935         return 0;
7936 }
7937
7938 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7939                                struct drm_plane *plane,
7940                                uint32_t crtc_index)
7941 {
7942         struct amdgpu_crtc *acrtc = NULL;
7943         struct drm_plane *cursor_plane;
7944
7945         int res = -ENOMEM;
7946
7947         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7948         if (!cursor_plane)
7949                 goto fail;
7950
7951         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7952         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7953
7954         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7955         if (!acrtc)
7956                 goto fail;
7957
7958         res = drm_crtc_init_with_planes(
7959                         dm->ddev,
7960                         &acrtc->base,
7961                         plane,
7962                         cursor_plane,
7963                         &amdgpu_dm_crtc_funcs, NULL);
7964
7965         if (res)
7966                 goto fail;
7967
7968         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7969
7970         /* Create (reset) the plane state */
7971         if (acrtc->base.funcs->reset)
7972                 acrtc->base.funcs->reset(&acrtc->base);
7973
7974         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7975         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7976
7977         acrtc->crtc_id = crtc_index;
7978         acrtc->base.enabled = false;
7979         acrtc->otg_inst = -1;
7980
7981         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7982         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7983                                    true, MAX_COLOR_LUT_ENTRIES);
7984         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7985
7986         return 0;
7987
7988 fail:
7989         kfree(acrtc);
7990         kfree(cursor_plane);
7991         return res;
7992 }
7993
7994
7995 static int to_drm_connector_type(enum signal_type st)
7996 {
7997         switch (st) {
7998         case SIGNAL_TYPE_HDMI_TYPE_A:
7999                 return DRM_MODE_CONNECTOR_HDMIA;
8000         case SIGNAL_TYPE_EDP:
8001                 return DRM_MODE_CONNECTOR_eDP;
8002         case SIGNAL_TYPE_LVDS:
8003                 return DRM_MODE_CONNECTOR_LVDS;
8004         case SIGNAL_TYPE_RGB:
8005                 return DRM_MODE_CONNECTOR_VGA;
8006         case SIGNAL_TYPE_DISPLAY_PORT:
8007         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8008                 return DRM_MODE_CONNECTOR_DisplayPort;
8009         case SIGNAL_TYPE_DVI_DUAL_LINK:
8010         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8011                 return DRM_MODE_CONNECTOR_DVID;
8012         case SIGNAL_TYPE_VIRTUAL:
8013                 return DRM_MODE_CONNECTOR_VIRTUAL;
8014
8015         default:
8016                 return DRM_MODE_CONNECTOR_Unknown;
8017         }
8018 }
8019
8020 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8021 {
8022         struct drm_encoder *encoder;
8023
8024         /* There is only one encoder per connector */
8025         drm_connector_for_each_possible_encoder(connector, encoder)
8026                 return encoder;
8027
8028         return NULL;
8029 }
8030
8031 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8032 {
8033         struct drm_encoder *encoder;
8034         struct amdgpu_encoder *amdgpu_encoder;
8035
8036         encoder = amdgpu_dm_connector_to_encoder(connector);
8037
8038         if (encoder == NULL)
8039                 return;
8040
8041         amdgpu_encoder = to_amdgpu_encoder(encoder);
8042
8043         amdgpu_encoder->native_mode.clock = 0;
8044
8045         if (!list_empty(&connector->probed_modes)) {
8046                 struct drm_display_mode *preferred_mode = NULL;
8047
8048                 list_for_each_entry(preferred_mode,
8049                                     &connector->probed_modes,
8050                                     head) {
8051                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8052                                 amdgpu_encoder->native_mode = *preferred_mode;
8053
8054                         break;
8055                 }
8056
8057         }
8058 }
8059
8060 static struct drm_display_mode *
8061 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8062                              char *name,
8063                              int hdisplay, int vdisplay)
8064 {
8065         struct drm_device *dev = encoder->dev;
8066         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8067         struct drm_display_mode *mode = NULL;
8068         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8069
8070         mode = drm_mode_duplicate(dev, native_mode);
8071
8072         if (mode == NULL)
8073                 return NULL;
8074
8075         mode->hdisplay = hdisplay;
8076         mode->vdisplay = vdisplay;
8077         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8078         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8079
8080         return mode;
8081
8082 }
8083
8084 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8085                                                  struct drm_connector *connector)
8086 {
8087         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8088         struct drm_display_mode *mode = NULL;
8089         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8090         struct amdgpu_dm_connector *amdgpu_dm_connector =
8091                                 to_amdgpu_dm_connector(connector);
8092         int i;
8093         int n;
8094         struct mode_size {
8095                 char name[DRM_DISPLAY_MODE_LEN];
8096                 int w;
8097                 int h;
8098         } common_modes[] = {
8099                 {  "640x480",  640,  480},
8100                 {  "800x600",  800,  600},
8101                 { "1024x768", 1024,  768},
8102                 { "1280x720", 1280,  720},
8103                 { "1280x800", 1280,  800},
8104                 {"1280x1024", 1280, 1024},
8105                 { "1440x900", 1440,  900},
8106                 {"1680x1050", 1680, 1050},
8107                 {"1600x1200", 1600, 1200},
8108                 {"1920x1080", 1920, 1080},
8109                 {"1920x1200", 1920, 1200}
8110         };
8111
8112         n = ARRAY_SIZE(common_modes);
8113
8114         for (i = 0; i < n; i++) {
8115                 struct drm_display_mode *curmode = NULL;
8116                 bool mode_existed = false;
8117
8118                 if (common_modes[i].w > native_mode->hdisplay ||
8119                     common_modes[i].h > native_mode->vdisplay ||
8120                    (common_modes[i].w == native_mode->hdisplay &&
8121                     common_modes[i].h == native_mode->vdisplay))
8122                         continue;
8123
8124                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8125                         if (common_modes[i].w == curmode->hdisplay &&
8126                             common_modes[i].h == curmode->vdisplay) {
8127                                 mode_existed = true;
8128                                 break;
8129                         }
8130                 }
8131
8132                 if (mode_existed)
8133                         continue;
8134
8135                 mode = amdgpu_dm_create_common_mode(encoder,
8136                                 common_modes[i].name, common_modes[i].w,
8137                                 common_modes[i].h);
8138                 if (!mode)
8139                         continue;
8140
8141                 drm_mode_probed_add(connector, mode);
8142                 amdgpu_dm_connector->num_modes++;
8143         }
8144 }
8145
8146 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8147 {
8148         struct drm_encoder *encoder;
8149         struct amdgpu_encoder *amdgpu_encoder;
8150         const struct drm_display_mode *native_mode;
8151
8152         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8153             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8154                 return;
8155
8156         encoder = amdgpu_dm_connector_to_encoder(connector);
8157         if (!encoder)
8158                 return;
8159
8160         amdgpu_encoder = to_amdgpu_encoder(encoder);
8161
8162         native_mode = &amdgpu_encoder->native_mode;
8163         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8164                 return;
8165
8166         drm_connector_set_panel_orientation_with_quirk(connector,
8167                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8168                                                        native_mode->hdisplay,
8169                                                        native_mode->vdisplay);
8170 }
8171
8172 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8173                                               struct edid *edid)
8174 {
8175         struct amdgpu_dm_connector *amdgpu_dm_connector =
8176                         to_amdgpu_dm_connector(connector);
8177
8178         if (edid) {
8179                 /* empty probed_modes */
8180                 INIT_LIST_HEAD(&connector->probed_modes);
8181                 amdgpu_dm_connector->num_modes =
8182                                 drm_add_edid_modes(connector, edid);
8183
8184                 /* sorting the probed modes before calling function
8185                  * amdgpu_dm_get_native_mode() since EDID can have
8186                  * more than one preferred mode. The modes that are
8187                  * later in the probed mode list could be of higher
8188                  * and preferred resolution. For example, 3840x2160
8189                  * resolution in base EDID preferred timing and 4096x2160
8190                  * preferred resolution in DID extension block later.
8191                  */
8192                 drm_mode_sort(&connector->probed_modes);
8193                 amdgpu_dm_get_native_mode(connector);
8194
8195                 /* Freesync capabilities are reset by calling
8196                  * drm_add_edid_modes() and need to be
8197                  * restored here.
8198                  */
8199                 amdgpu_dm_update_freesync_caps(connector, edid);
8200
8201                 amdgpu_set_panel_orientation(connector);
8202         } else {
8203                 amdgpu_dm_connector->num_modes = 0;
8204         }
8205 }
8206
8207 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8208                               struct drm_display_mode *mode)
8209 {
8210         struct drm_display_mode *m;
8211
8212         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8213                 if (drm_mode_equal(m, mode))
8214                         return true;
8215         }
8216
8217         return false;
8218 }
8219
8220 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8221 {
8222         const struct drm_display_mode *m;
8223         struct drm_display_mode *new_mode;
8224         uint i;
8225         uint32_t new_modes_count = 0;
8226
8227         /* Standard FPS values
8228          *
8229          * 23.976       - TV/NTSC
8230          * 24           - Cinema
8231          * 25           - TV/PAL
8232          * 29.97        - TV/NTSC
8233          * 30           - TV/NTSC
8234          * 48           - Cinema HFR
8235          * 50           - TV/PAL
8236          * 60           - Commonly used
8237          * 48,72,96,120 - Multiples of 24
8238          */
8239         static const uint32_t common_rates[] = {
8240                 23976, 24000, 25000, 29970, 30000,
8241                 48000, 50000, 60000, 72000, 96000, 120000
8242         };
8243
8244         /*
8245          * Find mode with highest refresh rate with the same resolution
8246          * as the preferred mode. Some monitors report a preferred mode
8247          * with lower resolution than the highest refresh rate supported.
8248          */
8249
8250         m = get_highest_refresh_rate_mode(aconnector, true);
8251         if (!m)
8252                 return 0;
8253
8254         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8255                 uint64_t target_vtotal, target_vtotal_diff;
8256                 uint64_t num, den;
8257
8258                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8259                         continue;
8260
8261                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8262                     common_rates[i] > aconnector->max_vfreq * 1000)
8263                         continue;
8264
8265                 num = (unsigned long long)m->clock * 1000 * 1000;
8266                 den = common_rates[i] * (unsigned long long)m->htotal;
8267                 target_vtotal = div_u64(num, den);
8268                 target_vtotal_diff = target_vtotal - m->vtotal;
8269
8270                 /* Check for illegal modes */
8271                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8272                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8273                     m->vtotal + target_vtotal_diff < m->vsync_end)
8274                         continue;
8275
8276                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8277                 if (!new_mode)
8278                         goto out;
8279
8280                 new_mode->vtotal += (u16)target_vtotal_diff;
8281                 new_mode->vsync_start += (u16)target_vtotal_diff;
8282                 new_mode->vsync_end += (u16)target_vtotal_diff;
8283                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8284                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8285
8286                 if (!is_duplicate_mode(aconnector, new_mode)) {
8287                         drm_mode_probed_add(&aconnector->base, new_mode);
8288                         new_modes_count += 1;
8289                 } else
8290                         drm_mode_destroy(aconnector->base.dev, new_mode);
8291         }
8292  out:
8293         return new_modes_count;
8294 }
8295
8296 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8297                                                    struct edid *edid)
8298 {
8299         struct amdgpu_dm_connector *amdgpu_dm_connector =
8300                 to_amdgpu_dm_connector(connector);
8301
8302         if (!edid)
8303                 return;
8304
8305         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8306                 amdgpu_dm_connector->num_modes +=
8307                         add_fs_modes(amdgpu_dm_connector);
8308 }
8309
8310 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8311 {
8312         struct amdgpu_dm_connector *amdgpu_dm_connector =
8313                         to_amdgpu_dm_connector(connector);
8314         struct drm_encoder *encoder;
8315         struct edid *edid = amdgpu_dm_connector->edid;
8316
8317         encoder = amdgpu_dm_connector_to_encoder(connector);
8318
8319         if (!drm_edid_is_valid(edid)) {
8320                 amdgpu_dm_connector->num_modes =
8321                                 drm_add_modes_noedid(connector, 640, 480);
8322         } else {
8323                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8324                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8325                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8326         }
8327         amdgpu_dm_fbc_init(connector);
8328
8329         return amdgpu_dm_connector->num_modes;
8330 }
8331
8332 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8333                                      struct amdgpu_dm_connector *aconnector,
8334                                      int connector_type,
8335                                      struct dc_link *link,
8336                                      int link_index)
8337 {
8338         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8339
8340         /*
8341          * Some of the properties below require access to state, like bpc.
8342          * Allocate some default initial connector state with our reset helper.
8343          */
8344         if (aconnector->base.funcs->reset)
8345                 aconnector->base.funcs->reset(&aconnector->base);
8346
8347         aconnector->connector_id = link_index;
8348         aconnector->dc_link = link;
8349         aconnector->base.interlace_allowed = false;
8350         aconnector->base.doublescan_allowed = false;
8351         aconnector->base.stereo_allowed = false;
8352         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8353         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8354         aconnector->audio_inst = -1;
8355         mutex_init(&aconnector->hpd_lock);
8356
8357         /*
8358          * configure support HPD hot plug connector_>polled default value is 0
8359          * which means HPD hot plug not supported
8360          */
8361         switch (connector_type) {
8362         case DRM_MODE_CONNECTOR_HDMIA:
8363                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8364                 aconnector->base.ycbcr_420_allowed =
8365                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8366                 break;
8367         case DRM_MODE_CONNECTOR_DisplayPort:
8368                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8369                 link->link_enc = link_enc_cfg_get_link_enc(link);
8370                 ASSERT(link->link_enc);
8371                 if (link->link_enc)
8372                         aconnector->base.ycbcr_420_allowed =
8373                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8374                 break;
8375         case DRM_MODE_CONNECTOR_DVID:
8376                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8377                 break;
8378         default:
8379                 break;
8380         }
8381
8382         drm_object_attach_property(&aconnector->base.base,
8383                                 dm->ddev->mode_config.scaling_mode_property,
8384                                 DRM_MODE_SCALE_NONE);
8385
8386         drm_object_attach_property(&aconnector->base.base,
8387                                 adev->mode_info.underscan_property,
8388                                 UNDERSCAN_OFF);
8389         drm_object_attach_property(&aconnector->base.base,
8390                                 adev->mode_info.underscan_hborder_property,
8391                                 0);
8392         drm_object_attach_property(&aconnector->base.base,
8393                                 adev->mode_info.underscan_vborder_property,
8394                                 0);
8395
8396         if (!aconnector->mst_port)
8397                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8398
8399         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8400         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8401         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8402
8403         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8404             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8405                 drm_object_attach_property(&aconnector->base.base,
8406                                 adev->mode_info.abm_level_property, 0);
8407         }
8408
8409         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8410             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8411             connector_type == DRM_MODE_CONNECTOR_eDP) {
8412                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8413
8414                 if (!aconnector->mst_port)
8415                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8416
8417 #ifdef CONFIG_DRM_AMD_DC_HDCP
8418                 if (adev->dm.hdcp_workqueue)
8419                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8420 #endif
8421         }
8422 }
8423
8424 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8425                               struct i2c_msg *msgs, int num)
8426 {
8427         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8428         struct ddc_service *ddc_service = i2c->ddc_service;
8429         struct i2c_command cmd;
8430         int i;
8431         int result = -EIO;
8432
8433         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8434
8435         if (!cmd.payloads)
8436                 return result;
8437
8438         cmd.number_of_payloads = num;
8439         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8440         cmd.speed = 100;
8441
8442         for (i = 0; i < num; i++) {
8443                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8444                 cmd.payloads[i].address = msgs[i].addr;
8445                 cmd.payloads[i].length = msgs[i].len;
8446                 cmd.payloads[i].data = msgs[i].buf;
8447         }
8448
8449         if (dc_submit_i2c(
8450                         ddc_service->ctx->dc,
8451                         ddc_service->ddc_pin->hw_info.ddc_channel,
8452                         &cmd))
8453                 result = num;
8454
8455         kfree(cmd.payloads);
8456         return result;
8457 }
8458
8459 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8460 {
8461         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8462 }
8463
8464 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8465         .master_xfer = amdgpu_dm_i2c_xfer,
8466         .functionality = amdgpu_dm_i2c_func,
8467 };
8468
8469 static struct amdgpu_i2c_adapter *
8470 create_i2c(struct ddc_service *ddc_service,
8471            int link_index,
8472            int *res)
8473 {
8474         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8475         struct amdgpu_i2c_adapter *i2c;
8476
8477         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8478         if (!i2c)
8479                 return NULL;
8480         i2c->base.owner = THIS_MODULE;
8481         i2c->base.class = I2C_CLASS_DDC;
8482         i2c->base.dev.parent = &adev->pdev->dev;
8483         i2c->base.algo = &amdgpu_dm_i2c_algo;
8484         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8485         i2c_set_adapdata(&i2c->base, i2c);
8486         i2c->ddc_service = ddc_service;
8487         if (i2c->ddc_service->ddc_pin)
8488                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8489
8490         return i2c;
8491 }
8492
8493
8494 /*
8495  * Note: this function assumes that dc_link_detect() was called for the
8496  * dc_link which will be represented by this aconnector.
8497  */
8498 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8499                                     struct amdgpu_dm_connector *aconnector,
8500                                     uint32_t link_index,
8501                                     struct amdgpu_encoder *aencoder)
8502 {
8503         int res = 0;
8504         int connector_type;
8505         struct dc *dc = dm->dc;
8506         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8507         struct amdgpu_i2c_adapter *i2c;
8508
8509         link->priv = aconnector;
8510
8511         DRM_DEBUG_DRIVER("%s()\n", __func__);
8512
8513         i2c = create_i2c(link->ddc, link->link_index, &res);
8514         if (!i2c) {
8515                 DRM_ERROR("Failed to create i2c adapter data\n");
8516                 return -ENOMEM;
8517         }
8518
8519         aconnector->i2c = i2c;
8520         res = i2c_add_adapter(&i2c->base);
8521
8522         if (res) {
8523                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8524                 goto out_free;
8525         }
8526
8527         connector_type = to_drm_connector_type(link->connector_signal);
8528
8529         res = drm_connector_init_with_ddc(
8530                         dm->ddev,
8531                         &aconnector->base,
8532                         &amdgpu_dm_connector_funcs,
8533                         connector_type,
8534                         &i2c->base);
8535
8536         if (res) {
8537                 DRM_ERROR("connector_init failed\n");
8538                 aconnector->connector_id = -1;
8539                 goto out_free;
8540         }
8541
8542         drm_connector_helper_add(
8543                         &aconnector->base,
8544                         &amdgpu_dm_connector_helper_funcs);
8545
8546         amdgpu_dm_connector_init_helper(
8547                 dm,
8548                 aconnector,
8549                 connector_type,
8550                 link,
8551                 link_index);
8552
8553         drm_connector_attach_encoder(
8554                 &aconnector->base, &aencoder->base);
8555
8556         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8557                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8558                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8559
8560 out_free:
8561         if (res) {
8562                 kfree(i2c);
8563                 aconnector->i2c = NULL;
8564         }
8565         return res;
8566 }
8567
8568 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8569 {
8570         switch (adev->mode_info.num_crtc) {
8571         case 1:
8572                 return 0x1;
8573         case 2:
8574                 return 0x3;
8575         case 3:
8576                 return 0x7;
8577         case 4:
8578                 return 0xf;
8579         case 5:
8580                 return 0x1f;
8581         case 6:
8582         default:
8583                 return 0x3f;
8584         }
8585 }
8586
8587 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8588                                   struct amdgpu_encoder *aencoder,
8589                                   uint32_t link_index)
8590 {
8591         struct amdgpu_device *adev = drm_to_adev(dev);
8592
8593         int res = drm_encoder_init(dev,
8594                                    &aencoder->base,
8595                                    &amdgpu_dm_encoder_funcs,
8596                                    DRM_MODE_ENCODER_TMDS,
8597                                    NULL);
8598
8599         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8600
8601         if (!res)
8602                 aencoder->encoder_id = link_index;
8603         else
8604                 aencoder->encoder_id = -1;
8605
8606         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8607
8608         return res;
8609 }
8610
8611 static void manage_dm_interrupts(struct amdgpu_device *adev,
8612                                  struct amdgpu_crtc *acrtc,
8613                                  bool enable)
8614 {
8615         /*
8616          * We have no guarantee that the frontend index maps to the same
8617          * backend index - some even map to more than one.
8618          *
8619          * TODO: Use a different interrupt or check DC itself for the mapping.
8620          */
8621         int irq_type =
8622                 amdgpu_display_crtc_idx_to_irq_type(
8623                         adev,
8624                         acrtc->crtc_id);
8625
8626         if (enable) {
8627                 drm_crtc_vblank_on(&acrtc->base);
8628                 amdgpu_irq_get(
8629                         adev,
8630                         &adev->pageflip_irq,
8631                         irq_type);
8632 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8633                 amdgpu_irq_get(
8634                         adev,
8635                         &adev->vline0_irq,
8636                         irq_type);
8637 #endif
8638         } else {
8639 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8640                 amdgpu_irq_put(
8641                         adev,
8642                         &adev->vline0_irq,
8643                         irq_type);
8644 #endif
8645                 amdgpu_irq_put(
8646                         adev,
8647                         &adev->pageflip_irq,
8648                         irq_type);
8649                 drm_crtc_vblank_off(&acrtc->base);
8650         }
8651 }
8652
8653 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8654                                       struct amdgpu_crtc *acrtc)
8655 {
8656         int irq_type =
8657                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8658
8659         /**
8660          * This reads the current state for the IRQ and force reapplies
8661          * the setting to hardware.
8662          */
8663         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8664 }
8665
8666 static bool
8667 is_scaling_state_different(const struct dm_connector_state *dm_state,
8668                            const struct dm_connector_state *old_dm_state)
8669 {
8670         if (dm_state->scaling != old_dm_state->scaling)
8671                 return true;
8672         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8673                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8674                         return true;
8675         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8676                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8677                         return true;
8678         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8679                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8680                 return true;
8681         return false;
8682 }
8683
8684 #ifdef CONFIG_DRM_AMD_DC_HDCP
8685 static bool is_content_protection_different(struct drm_connector_state *state,
8686                                             const struct drm_connector_state *old_state,
8687                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8688 {
8689         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8690         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8691
8692         /* Handle: Type0/1 change */
8693         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8694             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8695                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8696                 return true;
8697         }
8698
8699         /* CP is being re enabled, ignore this
8700          *
8701          * Handles:     ENABLED -> DESIRED
8702          */
8703         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8704             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8705                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8706                 return false;
8707         }
8708
8709         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8710          *
8711          * Handles:     UNDESIRED -> ENABLED
8712          */
8713         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8714             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8715                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8716
8717         /* Stream removed and re-enabled
8718          *
8719          * Can sometimes overlap with the HPD case,
8720          * thus set update_hdcp to false to avoid
8721          * setting HDCP multiple times.
8722          *
8723          * Handles:     DESIRED -> DESIRED (Special case)
8724          */
8725         if (!(old_state->crtc && old_state->crtc->enabled) &&
8726                 state->crtc && state->crtc->enabled &&
8727                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8728                 dm_con_state->update_hdcp = false;
8729                 return true;
8730         }
8731
8732         /* Hot-plug, headless s3, dpms
8733          *
8734          * Only start HDCP if the display is connected/enabled.
8735          * update_hdcp flag will be set to false until the next
8736          * HPD comes in.
8737          *
8738          * Handles:     DESIRED -> DESIRED (Special case)
8739          */
8740         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8741             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8742                 dm_con_state->update_hdcp = false;
8743                 return true;
8744         }
8745
8746         /*
8747          * Handles:     UNDESIRED -> UNDESIRED
8748          *              DESIRED -> DESIRED
8749          *              ENABLED -> ENABLED
8750          */
8751         if (old_state->content_protection == state->content_protection)
8752                 return false;
8753
8754         /*
8755          * Handles:     UNDESIRED -> DESIRED
8756          *              DESIRED -> UNDESIRED
8757          *              ENABLED -> UNDESIRED
8758          */
8759         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8760                 return true;
8761
8762         /*
8763          * Handles:     DESIRED -> ENABLED
8764          */
8765         return false;
8766 }
8767
8768 #endif
8769 static void remove_stream(struct amdgpu_device *adev,
8770                           struct amdgpu_crtc *acrtc,
8771                           struct dc_stream_state *stream)
8772 {
8773         /* this is the update mode case */
8774
8775         acrtc->otg_inst = -1;
8776         acrtc->enabled = false;
8777 }
8778
8779 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8780                                struct dc_cursor_position *position)
8781 {
8782         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8783         int x, y;
8784         int xorigin = 0, yorigin = 0;
8785
8786         if (!crtc || !plane->state->fb)
8787                 return 0;
8788
8789         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8790             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8791                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8792                           __func__,
8793                           plane->state->crtc_w,
8794                           plane->state->crtc_h);
8795                 return -EINVAL;
8796         }
8797
8798         x = plane->state->crtc_x;
8799         y = plane->state->crtc_y;
8800
8801         if (x <= -amdgpu_crtc->max_cursor_width ||
8802             y <= -amdgpu_crtc->max_cursor_height)
8803                 return 0;
8804
8805         if (x < 0) {
8806                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8807                 x = 0;
8808         }
8809         if (y < 0) {
8810                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8811                 y = 0;
8812         }
8813         position->enable = true;
8814         position->translate_by_source = true;
8815         position->x = x;
8816         position->y = y;
8817         position->x_hotspot = xorigin;
8818         position->y_hotspot = yorigin;
8819
8820         return 0;
8821 }
8822
8823 static void handle_cursor_update(struct drm_plane *plane,
8824                                  struct drm_plane_state *old_plane_state)
8825 {
8826         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8827         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8828         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8829         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8830         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8831         uint64_t address = afb ? afb->address : 0;
8832         struct dc_cursor_position position = {0};
8833         struct dc_cursor_attributes attributes;
8834         int ret;
8835
8836         if (!plane->state->fb && !old_plane_state->fb)
8837                 return;
8838
8839         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8840                       __func__,
8841                       amdgpu_crtc->crtc_id,
8842                       plane->state->crtc_w,
8843                       plane->state->crtc_h);
8844
8845         ret = get_cursor_position(plane, crtc, &position);
8846         if (ret)
8847                 return;
8848
8849         if (!position.enable) {
8850                 /* turn off cursor */
8851                 if (crtc_state && crtc_state->stream) {
8852                         mutex_lock(&adev->dm.dc_lock);
8853                         dc_stream_set_cursor_position(crtc_state->stream,
8854                                                       &position);
8855                         mutex_unlock(&adev->dm.dc_lock);
8856                 }
8857                 return;
8858         }
8859
8860         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8861         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8862
8863         memset(&attributes, 0, sizeof(attributes));
8864         attributes.address.high_part = upper_32_bits(address);
8865         attributes.address.low_part  = lower_32_bits(address);
8866         attributes.width             = plane->state->crtc_w;
8867         attributes.height            = plane->state->crtc_h;
8868         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8869         attributes.rotation_angle    = 0;
8870         attributes.attribute_flags.value = 0;
8871
8872         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8873
8874         if (crtc_state->stream) {
8875                 mutex_lock(&adev->dm.dc_lock);
8876                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8877                                                          &attributes))
8878                         DRM_ERROR("DC failed to set cursor attributes\n");
8879
8880                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8881                                                    &position))
8882                         DRM_ERROR("DC failed to set cursor position\n");
8883                 mutex_unlock(&adev->dm.dc_lock);
8884         }
8885 }
8886
8887 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8888 {
8889
8890         assert_spin_locked(&acrtc->base.dev->event_lock);
8891         WARN_ON(acrtc->event);
8892
8893         acrtc->event = acrtc->base.state->event;
8894
8895         /* Set the flip status */
8896         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8897
8898         /* Mark this event as consumed */
8899         acrtc->base.state->event = NULL;
8900
8901         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8902                      acrtc->crtc_id);
8903 }
8904
8905 static void update_freesync_state_on_stream(
8906         struct amdgpu_display_manager *dm,
8907         struct dm_crtc_state *new_crtc_state,
8908         struct dc_stream_state *new_stream,
8909         struct dc_plane_state *surface,
8910         u32 flip_timestamp_in_us)
8911 {
8912         struct mod_vrr_params vrr_params;
8913         struct dc_info_packet vrr_infopacket = {0};
8914         struct amdgpu_device *adev = dm->adev;
8915         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8916         unsigned long flags;
8917         bool pack_sdp_v1_3 = false;
8918
8919         if (!new_stream)
8920                 return;
8921
8922         /*
8923          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8924          * For now it's sufficient to just guard against these conditions.
8925          */
8926
8927         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8928                 return;
8929
8930         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8931         vrr_params = acrtc->dm_irq_params.vrr_params;
8932
8933         if (surface) {
8934                 mod_freesync_handle_preflip(
8935                         dm->freesync_module,
8936                         surface,
8937                         new_stream,
8938                         flip_timestamp_in_us,
8939                         &vrr_params);
8940
8941                 if (adev->family < AMDGPU_FAMILY_AI &&
8942                     amdgpu_dm_vrr_active(new_crtc_state)) {
8943                         mod_freesync_handle_v_update(dm->freesync_module,
8944                                                      new_stream, &vrr_params);
8945
8946                         /* Need to call this before the frame ends. */
8947                         dc_stream_adjust_vmin_vmax(dm->dc,
8948                                                    new_crtc_state->stream,
8949                                                    &vrr_params.adjust);
8950                 }
8951         }
8952
8953         mod_freesync_build_vrr_infopacket(
8954                 dm->freesync_module,
8955                 new_stream,
8956                 &vrr_params,
8957                 PACKET_TYPE_VRR,
8958                 TRANSFER_FUNC_UNKNOWN,
8959                 &vrr_infopacket,
8960                 pack_sdp_v1_3);
8961
8962         new_crtc_state->freesync_timing_changed |=
8963                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8964                         &vrr_params.adjust,
8965                         sizeof(vrr_params.adjust)) != 0);
8966
8967         new_crtc_state->freesync_vrr_info_changed |=
8968                 (memcmp(&new_crtc_state->vrr_infopacket,
8969                         &vrr_infopacket,
8970                         sizeof(vrr_infopacket)) != 0);
8971
8972         acrtc->dm_irq_params.vrr_params = vrr_params;
8973         new_crtc_state->vrr_infopacket = vrr_infopacket;
8974
8975         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8976         new_stream->vrr_infopacket = vrr_infopacket;
8977
8978         if (new_crtc_state->freesync_vrr_info_changed)
8979                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8980                               new_crtc_state->base.crtc->base.id,
8981                               (int)new_crtc_state->base.vrr_enabled,
8982                               (int)vrr_params.state);
8983
8984         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8985 }
8986
8987 static void update_stream_irq_parameters(
8988         struct amdgpu_display_manager *dm,
8989         struct dm_crtc_state *new_crtc_state)
8990 {
8991         struct dc_stream_state *new_stream = new_crtc_state->stream;
8992         struct mod_vrr_params vrr_params;
8993         struct mod_freesync_config config = new_crtc_state->freesync_config;
8994         struct amdgpu_device *adev = dm->adev;
8995         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8996         unsigned long flags;
8997
8998         if (!new_stream)
8999                 return;
9000
9001         /*
9002          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9003          * For now it's sufficient to just guard against these conditions.
9004          */
9005         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9006                 return;
9007
9008         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9009         vrr_params = acrtc->dm_irq_params.vrr_params;
9010
9011         if (new_crtc_state->vrr_supported &&
9012             config.min_refresh_in_uhz &&
9013             config.max_refresh_in_uhz) {
9014                 /*
9015                  * if freesync compatible mode was set, config.state will be set
9016                  * in atomic check
9017                  */
9018                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9019                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9020                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9021                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9022                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9023                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9024                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9025                 } else {
9026                         config.state = new_crtc_state->base.vrr_enabled ?
9027                                                      VRR_STATE_ACTIVE_VARIABLE :
9028                                                      VRR_STATE_INACTIVE;
9029                 }
9030         } else {
9031                 config.state = VRR_STATE_UNSUPPORTED;
9032         }
9033
9034         mod_freesync_build_vrr_params(dm->freesync_module,
9035                                       new_stream,
9036                                       &config, &vrr_params);
9037
9038         new_crtc_state->freesync_timing_changed |=
9039                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9040                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9041
9042         new_crtc_state->freesync_config = config;
9043         /* Copy state for access from DM IRQ handler */
9044         acrtc->dm_irq_params.freesync_config = config;
9045         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9046         acrtc->dm_irq_params.vrr_params = vrr_params;
9047         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9048 }
9049
9050 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9051                                             struct dm_crtc_state *new_state)
9052 {
9053         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9054         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9055
9056         if (!old_vrr_active && new_vrr_active) {
9057                 /* Transition VRR inactive -> active:
9058                  * While VRR is active, we must not disable vblank irq, as a
9059                  * reenable after disable would compute bogus vblank/pflip
9060                  * timestamps if it likely happened inside display front-porch.
9061                  *
9062                  * We also need vupdate irq for the actual core vblank handling
9063                  * at end of vblank.
9064                  */
9065                 dm_set_vupdate_irq(new_state->base.crtc, true);
9066                 drm_crtc_vblank_get(new_state->base.crtc);
9067                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9068                                  __func__, new_state->base.crtc->base.id);
9069         } else if (old_vrr_active && !new_vrr_active) {
9070                 /* Transition VRR active -> inactive:
9071                  * Allow vblank irq disable again for fixed refresh rate.
9072                  */
9073                 dm_set_vupdate_irq(new_state->base.crtc, false);
9074                 drm_crtc_vblank_put(new_state->base.crtc);
9075                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9076                                  __func__, new_state->base.crtc->base.id);
9077         }
9078 }
9079
9080 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9081 {
9082         struct drm_plane *plane;
9083         struct drm_plane_state *old_plane_state;
9084         int i;
9085
9086         /*
9087          * TODO: Make this per-stream so we don't issue redundant updates for
9088          * commits with multiple streams.
9089          */
9090         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9091                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9092                         handle_cursor_update(plane, old_plane_state);
9093 }
9094
9095 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9096                                     struct dc_state *dc_state,
9097                                     struct drm_device *dev,
9098                                     struct amdgpu_display_manager *dm,
9099                                     struct drm_crtc *pcrtc,
9100                                     bool wait_for_vblank)
9101 {
9102         uint32_t i;
9103         uint64_t timestamp_ns;
9104         struct drm_plane *plane;
9105         struct drm_plane_state *old_plane_state, *new_plane_state;
9106         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9107         struct drm_crtc_state *new_pcrtc_state =
9108                         drm_atomic_get_new_crtc_state(state, pcrtc);
9109         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9110         struct dm_crtc_state *dm_old_crtc_state =
9111                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9112         int planes_count = 0, vpos, hpos;
9113         long r;
9114         unsigned long flags;
9115         struct amdgpu_bo *abo;
9116         uint32_t target_vblank, last_flip_vblank;
9117         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9118         bool pflip_present = false;
9119         struct {
9120                 struct dc_surface_update surface_updates[MAX_SURFACES];
9121                 struct dc_plane_info plane_infos[MAX_SURFACES];
9122                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9123                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9124                 struct dc_stream_update stream_update;
9125         } *bundle;
9126
9127         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9128
9129         if (!bundle) {
9130                 dm_error("Failed to allocate update bundle\n");
9131                 goto cleanup;
9132         }
9133
9134         /*
9135          * Disable the cursor first if we're disabling all the planes.
9136          * It'll remain on the screen after the planes are re-enabled
9137          * if we don't.
9138          */
9139         if (acrtc_state->active_planes == 0)
9140                 amdgpu_dm_commit_cursors(state);
9141
9142         /* update planes when needed */
9143         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9144                 struct drm_crtc *crtc = new_plane_state->crtc;
9145                 struct drm_crtc_state *new_crtc_state;
9146                 struct drm_framebuffer *fb = new_plane_state->fb;
9147                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9148                 bool plane_needs_flip;
9149                 struct dc_plane_state *dc_plane;
9150                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9151
9152                 /* Cursor plane is handled after stream updates */
9153                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9154                         continue;
9155
9156                 if (!fb || !crtc || pcrtc != crtc)
9157                         continue;
9158
9159                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9160                 if (!new_crtc_state->active)
9161                         continue;
9162
9163                 dc_plane = dm_new_plane_state->dc_state;
9164
9165                 bundle->surface_updates[planes_count].surface = dc_plane;
9166                 if (new_pcrtc_state->color_mgmt_changed) {
9167                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9168                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9169                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9170                 }
9171
9172                 fill_dc_scaling_info(dm->adev, new_plane_state,
9173                                      &bundle->scaling_infos[planes_count]);
9174
9175                 bundle->surface_updates[planes_count].scaling_info =
9176                         &bundle->scaling_infos[planes_count];
9177
9178                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9179
9180                 pflip_present = pflip_present || plane_needs_flip;
9181
9182                 if (!plane_needs_flip) {
9183                         planes_count += 1;
9184                         continue;
9185                 }
9186
9187                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9188
9189                 /*
9190                  * Wait for all fences on this FB. Do limited wait to avoid
9191                  * deadlock during GPU reset when this fence will not signal
9192                  * but we hold reservation lock for the BO.
9193                  */
9194                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9195                                           msecs_to_jiffies(5000));
9196                 if (unlikely(r <= 0))
9197                         DRM_ERROR("Waiting for fences timed out!");
9198
9199                 fill_dc_plane_info_and_addr(
9200                         dm->adev, new_plane_state,
9201                         afb->tiling_flags,
9202                         &bundle->plane_infos[planes_count],
9203                         &bundle->flip_addrs[planes_count].address,
9204                         afb->tmz_surface, false);
9205
9206                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9207                                  new_plane_state->plane->index,
9208                                  bundle->plane_infos[planes_count].dcc.enable);
9209
9210                 bundle->surface_updates[planes_count].plane_info =
9211                         &bundle->plane_infos[planes_count];
9212
9213                 /*
9214                  * Only allow immediate flips for fast updates that don't
9215                  * change FB pitch, DCC state, rotation or mirroing.
9216                  */
9217                 bundle->flip_addrs[planes_count].flip_immediate =
9218                         crtc->state->async_flip &&
9219                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9220
9221                 timestamp_ns = ktime_get_ns();
9222                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9223                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9224                 bundle->surface_updates[planes_count].surface = dc_plane;
9225
9226                 if (!bundle->surface_updates[planes_count].surface) {
9227                         DRM_ERROR("No surface for CRTC: id=%d\n",
9228                                         acrtc_attach->crtc_id);
9229                         continue;
9230                 }
9231
9232                 if (plane == pcrtc->primary)
9233                         update_freesync_state_on_stream(
9234                                 dm,
9235                                 acrtc_state,
9236                                 acrtc_state->stream,
9237                                 dc_plane,
9238                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9239
9240                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9241                                  __func__,
9242                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9243                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9244
9245                 planes_count += 1;
9246
9247         }
9248
9249         if (pflip_present) {
9250                 if (!vrr_active) {
9251                         /* Use old throttling in non-vrr fixed refresh rate mode
9252                          * to keep flip scheduling based on target vblank counts
9253                          * working in a backwards compatible way, e.g., for
9254                          * clients using the GLX_OML_sync_control extension or
9255                          * DRI3/Present extension with defined target_msc.
9256                          */
9257                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9258                 }
9259                 else {
9260                         /* For variable refresh rate mode only:
9261                          * Get vblank of last completed flip to avoid > 1 vrr
9262                          * flips per video frame by use of throttling, but allow
9263                          * flip programming anywhere in the possibly large
9264                          * variable vrr vblank interval for fine-grained flip
9265                          * timing control and more opportunity to avoid stutter
9266                          * on late submission of flips.
9267                          */
9268                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9269                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9270                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9271                 }
9272
9273                 target_vblank = last_flip_vblank + wait_for_vblank;
9274
9275                 /*
9276                  * Wait until we're out of the vertical blank period before the one
9277                  * targeted by the flip
9278                  */
9279                 while ((acrtc_attach->enabled &&
9280                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9281                                                             0, &vpos, &hpos, NULL,
9282                                                             NULL, &pcrtc->hwmode)
9283                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9284                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9285                         (int)(target_vblank -
9286                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9287                         usleep_range(1000, 1100);
9288                 }
9289
9290                 /**
9291                  * Prepare the flip event for the pageflip interrupt to handle.
9292                  *
9293                  * This only works in the case where we've already turned on the
9294                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9295                  * from 0 -> n planes we have to skip a hardware generated event
9296                  * and rely on sending it from software.
9297                  */
9298                 if (acrtc_attach->base.state->event &&
9299                     acrtc_state->active_planes > 0 &&
9300                     !acrtc_state->force_dpms_off) {
9301                         drm_crtc_vblank_get(pcrtc);
9302
9303                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9304
9305                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9306                         prepare_flip_isr(acrtc_attach);
9307
9308                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9309                 }
9310
9311                 if (acrtc_state->stream) {
9312                         if (acrtc_state->freesync_vrr_info_changed)
9313                                 bundle->stream_update.vrr_infopacket =
9314                                         &acrtc_state->stream->vrr_infopacket;
9315                 }
9316         }
9317
9318         /* Update the planes if changed or disable if we don't have any. */
9319         if ((planes_count || acrtc_state->active_planes == 0) &&
9320                 acrtc_state->stream) {
9321 #if defined(CONFIG_DRM_AMD_DC_DCN)
9322                 /*
9323                  * If PSR or idle optimizations are enabled then flush out
9324                  * any pending work before hardware programming.
9325                  */
9326                 if (dm->vblank_control_workqueue)
9327                         flush_workqueue(dm->vblank_control_workqueue);
9328 #endif
9329
9330                 bundle->stream_update.stream = acrtc_state->stream;
9331                 if (new_pcrtc_state->mode_changed) {
9332                         bundle->stream_update.src = acrtc_state->stream->src;
9333                         bundle->stream_update.dst = acrtc_state->stream->dst;
9334                 }
9335
9336                 if (new_pcrtc_state->color_mgmt_changed) {
9337                         /*
9338                          * TODO: This isn't fully correct since we've actually
9339                          * already modified the stream in place.
9340                          */
9341                         bundle->stream_update.gamut_remap =
9342                                 &acrtc_state->stream->gamut_remap_matrix;
9343                         bundle->stream_update.output_csc_transform =
9344                                 &acrtc_state->stream->csc_color_matrix;
9345                         bundle->stream_update.out_transfer_func =
9346                                 acrtc_state->stream->out_transfer_func;
9347                 }
9348
9349                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9350                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9351                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9352
9353                 /*
9354                  * If FreeSync state on the stream has changed then we need to
9355                  * re-adjust the min/max bounds now that DC doesn't handle this
9356                  * as part of commit.
9357                  */
9358                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9359                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9360                         dc_stream_adjust_vmin_vmax(
9361                                 dm->dc, acrtc_state->stream,
9362                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9363                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9364                 }
9365                 mutex_lock(&dm->dc_lock);
9366                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9367                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9368                         amdgpu_dm_psr_disable(acrtc_state->stream);
9369
9370                 dc_commit_updates_for_stream(dm->dc,
9371                                                      bundle->surface_updates,
9372                                                      planes_count,
9373                                                      acrtc_state->stream,
9374                                                      &bundle->stream_update,
9375                                                      dc_state);
9376
9377                 /**
9378                  * Enable or disable the interrupts on the backend.
9379                  *
9380                  * Most pipes are put into power gating when unused.
9381                  *
9382                  * When power gating is enabled on a pipe we lose the
9383                  * interrupt enablement state when power gating is disabled.
9384                  *
9385                  * So we need to update the IRQ control state in hardware
9386                  * whenever the pipe turns on (since it could be previously
9387                  * power gated) or off (since some pipes can't be power gated
9388                  * on some ASICs).
9389                  */
9390                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9391                         dm_update_pflip_irq_state(drm_to_adev(dev),
9392                                                   acrtc_attach);
9393
9394                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9395                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9396                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9397                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9398
9399                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9400                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9401                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9402                         struct amdgpu_dm_connector *aconn =
9403                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9404
9405                         if (aconn->psr_skip_count > 0)
9406                                 aconn->psr_skip_count--;
9407
9408                         /* Allow PSR when skip count is 0. */
9409                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9410                 } else {
9411                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9412                 }
9413
9414                 mutex_unlock(&dm->dc_lock);
9415         }
9416
9417         /*
9418          * Update cursor state *after* programming all the planes.
9419          * This avoids redundant programming in the case where we're going
9420          * to be disabling a single plane - those pipes are being disabled.
9421          */
9422         if (acrtc_state->active_planes)
9423                 amdgpu_dm_commit_cursors(state);
9424
9425 cleanup:
9426         kfree(bundle);
9427 }
9428
9429 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9430                                    struct drm_atomic_state *state)
9431 {
9432         struct amdgpu_device *adev = drm_to_adev(dev);
9433         struct amdgpu_dm_connector *aconnector;
9434         struct drm_connector *connector;
9435         struct drm_connector_state *old_con_state, *new_con_state;
9436         struct drm_crtc_state *new_crtc_state;
9437         struct dm_crtc_state *new_dm_crtc_state;
9438         const struct dc_stream_status *status;
9439         int i, inst;
9440
9441         /* Notify device removals. */
9442         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9443                 if (old_con_state->crtc != new_con_state->crtc) {
9444                         /* CRTC changes require notification. */
9445                         goto notify;
9446                 }
9447
9448                 if (!new_con_state->crtc)
9449                         continue;
9450
9451                 new_crtc_state = drm_atomic_get_new_crtc_state(
9452                         state, new_con_state->crtc);
9453
9454                 if (!new_crtc_state)
9455                         continue;
9456
9457                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9458                         continue;
9459
9460         notify:
9461                 aconnector = to_amdgpu_dm_connector(connector);
9462
9463                 mutex_lock(&adev->dm.audio_lock);
9464                 inst = aconnector->audio_inst;
9465                 aconnector->audio_inst = -1;
9466                 mutex_unlock(&adev->dm.audio_lock);
9467
9468                 amdgpu_dm_audio_eld_notify(adev, inst);
9469         }
9470
9471         /* Notify audio device additions. */
9472         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9473                 if (!new_con_state->crtc)
9474                         continue;
9475
9476                 new_crtc_state = drm_atomic_get_new_crtc_state(
9477                         state, new_con_state->crtc);
9478
9479                 if (!new_crtc_state)
9480                         continue;
9481
9482                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9483                         continue;
9484
9485                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9486                 if (!new_dm_crtc_state->stream)
9487                         continue;
9488
9489                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9490                 if (!status)
9491                         continue;
9492
9493                 aconnector = to_amdgpu_dm_connector(connector);
9494
9495                 mutex_lock(&adev->dm.audio_lock);
9496                 inst = status->audio_inst;
9497                 aconnector->audio_inst = inst;
9498                 mutex_unlock(&adev->dm.audio_lock);
9499
9500                 amdgpu_dm_audio_eld_notify(adev, inst);
9501         }
9502 }
9503
9504 /*
9505  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9506  * @crtc_state: the DRM CRTC state
9507  * @stream_state: the DC stream state.
9508  *
9509  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9510  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9511  */
9512 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9513                                                 struct dc_stream_state *stream_state)
9514 {
9515         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9516 }
9517
9518 /**
9519  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9520  * @state: The atomic state to commit
9521  *
9522  * This will tell DC to commit the constructed DC state from atomic_check,
9523  * programming the hardware. Any failures here implies a hardware failure, since
9524  * atomic check should have filtered anything non-kosher.
9525  */
9526 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9527 {
9528         struct drm_device *dev = state->dev;
9529         struct amdgpu_device *adev = drm_to_adev(dev);
9530         struct amdgpu_display_manager *dm = &adev->dm;
9531         struct dm_atomic_state *dm_state;
9532         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9533         uint32_t i, j;
9534         struct drm_crtc *crtc;
9535         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9536         unsigned long flags;
9537         bool wait_for_vblank = true;
9538         struct drm_connector *connector;
9539         struct drm_connector_state *old_con_state, *new_con_state;
9540         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9541         int crtc_disable_count = 0;
9542         bool mode_set_reset_required = false;
9543
9544         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9545
9546         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9547
9548         dm_state = dm_atomic_get_new_state(state);
9549         if (dm_state && dm_state->context) {
9550                 dc_state = dm_state->context;
9551         } else {
9552                 /* No state changes, retain current state. */
9553                 dc_state_temp = dc_create_state(dm->dc);
9554                 ASSERT(dc_state_temp);
9555                 dc_state = dc_state_temp;
9556                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9557         }
9558
9559         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9560                                        new_crtc_state, i) {
9561                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9562
9563                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9564
9565                 if (old_crtc_state->active &&
9566                     (!new_crtc_state->active ||
9567                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9568                         manage_dm_interrupts(adev, acrtc, false);
9569                         dc_stream_release(dm_old_crtc_state->stream);
9570                 }
9571         }
9572
9573         drm_atomic_helper_calc_timestamping_constants(state);
9574
9575         /* update changed items */
9576         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9577                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9578
9579                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9580                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9581
9582                 DRM_DEBUG_ATOMIC(
9583                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9584                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9585                         "connectors_changed:%d\n",
9586                         acrtc->crtc_id,
9587                         new_crtc_state->enable,
9588                         new_crtc_state->active,
9589                         new_crtc_state->planes_changed,
9590                         new_crtc_state->mode_changed,
9591                         new_crtc_state->active_changed,
9592                         new_crtc_state->connectors_changed);
9593
9594                 /* Disable cursor if disabling crtc */
9595                 if (old_crtc_state->active && !new_crtc_state->active) {
9596                         struct dc_cursor_position position;
9597
9598                         memset(&position, 0, sizeof(position));
9599                         mutex_lock(&dm->dc_lock);
9600                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9601                         mutex_unlock(&dm->dc_lock);
9602                 }
9603
9604                 /* Copy all transient state flags into dc state */
9605                 if (dm_new_crtc_state->stream) {
9606                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9607                                                             dm_new_crtc_state->stream);
9608                 }
9609
9610                 /* handles headless hotplug case, updating new_state and
9611                  * aconnector as needed
9612                  */
9613
9614                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9615
9616                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9617
9618                         if (!dm_new_crtc_state->stream) {
9619                                 /*
9620                                  * this could happen because of issues with
9621                                  * userspace notifications delivery.
9622                                  * In this case userspace tries to set mode on
9623                                  * display which is disconnected in fact.
9624                                  * dc_sink is NULL in this case on aconnector.
9625                                  * We expect reset mode will come soon.
9626                                  *
9627                                  * This can also happen when unplug is done
9628                                  * during resume sequence ended
9629                                  *
9630                                  * In this case, we want to pretend we still
9631                                  * have a sink to keep the pipe running so that
9632                                  * hw state is consistent with the sw state
9633                                  */
9634                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9635                                                 __func__, acrtc->base.base.id);
9636                                 continue;
9637                         }
9638
9639                         if (dm_old_crtc_state->stream)
9640                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9641
9642                         pm_runtime_get_noresume(dev->dev);
9643
9644                         acrtc->enabled = true;
9645                         acrtc->hw_mode = new_crtc_state->mode;
9646                         crtc->hwmode = new_crtc_state->mode;
9647                         mode_set_reset_required = true;
9648                 } else if (modereset_required(new_crtc_state)) {
9649                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9650                         /* i.e. reset mode */
9651                         if (dm_old_crtc_state->stream)
9652                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9653
9654                         mode_set_reset_required = true;
9655                 }
9656         } /* for_each_crtc_in_state() */
9657
9658         if (dc_state) {
9659                 /* if there mode set or reset, disable eDP PSR */
9660                 if (mode_set_reset_required) {
9661 #if defined(CONFIG_DRM_AMD_DC_DCN)
9662                         if (dm->vblank_control_workqueue)
9663                                 flush_workqueue(dm->vblank_control_workqueue);
9664 #endif
9665                         amdgpu_dm_psr_disable_all(dm);
9666                 }
9667
9668                 dm_enable_per_frame_crtc_master_sync(dc_state);
9669                 mutex_lock(&dm->dc_lock);
9670                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9671 #if defined(CONFIG_DRM_AMD_DC_DCN)
9672                /* Allow idle optimization when vblank count is 0 for display off */
9673                if (dm->active_vblank_irq_count == 0)
9674                    dc_allow_idle_optimizations(dm->dc,true);
9675 #endif
9676                 mutex_unlock(&dm->dc_lock);
9677         }
9678
9679         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9680                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9681
9682                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9683
9684                 if (dm_new_crtc_state->stream != NULL) {
9685                         const struct dc_stream_status *status =
9686                                         dc_stream_get_status(dm_new_crtc_state->stream);
9687
9688                         if (!status)
9689                                 status = dc_stream_get_status_from_state(dc_state,
9690                                                                          dm_new_crtc_state->stream);
9691                         if (!status)
9692                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9693                         else
9694                                 acrtc->otg_inst = status->primary_otg_inst;
9695                 }
9696         }
9697 #ifdef CONFIG_DRM_AMD_DC_HDCP
9698         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9699                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9700                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9701                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9702
9703                 new_crtc_state = NULL;
9704
9705                 if (acrtc)
9706                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9707
9708                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9709
9710                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9711                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9712                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9713                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9714                         dm_new_con_state->update_hdcp = true;
9715                         continue;
9716                 }
9717
9718                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9719                         hdcp_update_display(
9720                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9721                                 new_con_state->hdcp_content_type,
9722                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9723         }
9724 #endif
9725
9726         /* Handle connector state changes */
9727         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9728                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9729                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9730                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9731                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9732                 struct dc_stream_update stream_update;
9733                 struct dc_info_packet hdr_packet;
9734                 struct dc_stream_status *status = NULL;
9735                 bool abm_changed, hdr_changed, scaling_changed;
9736
9737                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9738                 memset(&stream_update, 0, sizeof(stream_update));
9739
9740                 if (acrtc) {
9741                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9742                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9743                 }
9744
9745                 /* Skip any modesets/resets */
9746                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9747                         continue;
9748
9749                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9750                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9751
9752                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9753                                                              dm_old_con_state);
9754
9755                 abm_changed = dm_new_crtc_state->abm_level !=
9756                               dm_old_crtc_state->abm_level;
9757
9758                 hdr_changed =
9759                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9760
9761                 if (!scaling_changed && !abm_changed && !hdr_changed)
9762                         continue;
9763
9764                 stream_update.stream = dm_new_crtc_state->stream;
9765                 if (scaling_changed) {
9766                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9767                                         dm_new_con_state, dm_new_crtc_state->stream);
9768
9769                         stream_update.src = dm_new_crtc_state->stream->src;
9770                         stream_update.dst = dm_new_crtc_state->stream->dst;
9771                 }
9772
9773                 if (abm_changed) {
9774                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9775
9776                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9777                 }
9778
9779                 if (hdr_changed) {
9780                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9781                         stream_update.hdr_static_metadata = &hdr_packet;
9782                 }
9783
9784                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9785
9786                 if (WARN_ON(!status))
9787                         continue;
9788
9789                 WARN_ON(!status->plane_count);
9790
9791                 /*
9792                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9793                  * Here we create an empty update on each plane.
9794                  * To fix this, DC should permit updating only stream properties.
9795                  */
9796                 for (j = 0; j < status->plane_count; j++)
9797                         dummy_updates[j].surface = status->plane_states[0];
9798
9799
9800                 mutex_lock(&dm->dc_lock);
9801                 dc_commit_updates_for_stream(dm->dc,
9802                                                      dummy_updates,
9803                                                      status->plane_count,
9804                                                      dm_new_crtc_state->stream,
9805                                                      &stream_update,
9806                                                      dc_state);
9807                 mutex_unlock(&dm->dc_lock);
9808         }
9809
9810         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9811         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9812                                       new_crtc_state, i) {
9813                 if (old_crtc_state->active && !new_crtc_state->active)
9814                         crtc_disable_count++;
9815
9816                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9817                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9818
9819                 /* For freesync config update on crtc state and params for irq */
9820                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9821
9822                 /* Handle vrr on->off / off->on transitions */
9823                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9824                                                 dm_new_crtc_state);
9825         }
9826
9827         /**
9828          * Enable interrupts for CRTCs that are newly enabled or went through
9829          * a modeset. It was intentionally deferred until after the front end
9830          * state was modified to wait until the OTG was on and so the IRQ
9831          * handlers didn't access stale or invalid state.
9832          */
9833         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9834                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9835 #ifdef CONFIG_DEBUG_FS
9836                 bool configure_crc = false;
9837                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9838 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9839                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9840 #endif
9841                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9842                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9843                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9844 #endif
9845                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9846
9847                 if (new_crtc_state->active &&
9848                     (!old_crtc_state->active ||
9849                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9850                         dc_stream_retain(dm_new_crtc_state->stream);
9851                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9852                         manage_dm_interrupts(adev, acrtc, true);
9853
9854 #ifdef CONFIG_DEBUG_FS
9855                         /**
9856                          * Frontend may have changed so reapply the CRC capture
9857                          * settings for the stream.
9858                          */
9859                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9860
9861                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9862                                 configure_crc = true;
9863 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9864                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9865                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9866                                         acrtc->dm_irq_params.crc_window.update_win = true;
9867                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9868                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9869                                         crc_rd_wrk->crtc = crtc;
9870                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9871                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9872                                 }
9873 #endif
9874                         }
9875
9876                         if (configure_crc)
9877                                 if (amdgpu_dm_crtc_configure_crc_source(
9878                                         crtc, dm_new_crtc_state, cur_crc_src))
9879                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9880 #endif
9881                 }
9882         }
9883
9884         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9885                 if (new_crtc_state->async_flip)
9886                         wait_for_vblank = false;
9887
9888         /* update planes when needed per crtc*/
9889         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9890                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9891
9892                 if (dm_new_crtc_state->stream)
9893                         amdgpu_dm_commit_planes(state, dc_state, dev,
9894                                                 dm, crtc, wait_for_vblank);
9895         }
9896
9897         /* Update audio instances for each connector. */
9898         amdgpu_dm_commit_audio(dev, state);
9899
9900 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9901         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9902         /* restore the backlight level */
9903         for (i = 0; i < dm->num_of_edps; i++) {
9904                 if (dm->backlight_dev[i] &&
9905                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9906                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9907         }
9908 #endif
9909         /*
9910          * send vblank event on all events not handled in flip and
9911          * mark consumed event for drm_atomic_helper_commit_hw_done
9912          */
9913         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9914         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9915
9916                 if (new_crtc_state->event)
9917                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9918
9919                 new_crtc_state->event = NULL;
9920         }
9921         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9922
9923         /* Signal HW programming completion */
9924         drm_atomic_helper_commit_hw_done(state);
9925
9926         if (wait_for_vblank)
9927                 drm_atomic_helper_wait_for_flip_done(dev, state);
9928
9929         drm_atomic_helper_cleanup_planes(dev, state);
9930
9931         /* return the stolen vga memory back to VRAM */
9932         if (!adev->mman.keep_stolen_vga_memory)
9933                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9934         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9935
9936         /*
9937          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9938          * so we can put the GPU into runtime suspend if we're not driving any
9939          * displays anymore
9940          */
9941         for (i = 0; i < crtc_disable_count; i++)
9942                 pm_runtime_put_autosuspend(dev->dev);
9943         pm_runtime_mark_last_busy(dev->dev);
9944
9945         if (dc_state_temp)
9946                 dc_release_state(dc_state_temp);
9947 }
9948
9949
9950 static int dm_force_atomic_commit(struct drm_connector *connector)
9951 {
9952         int ret = 0;
9953         struct drm_device *ddev = connector->dev;
9954         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9955         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9956         struct drm_plane *plane = disconnected_acrtc->base.primary;
9957         struct drm_connector_state *conn_state;
9958         struct drm_crtc_state *crtc_state;
9959         struct drm_plane_state *plane_state;
9960
9961         if (!state)
9962                 return -ENOMEM;
9963
9964         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9965
9966         /* Construct an atomic state to restore previous display setting */
9967
9968         /*
9969          * Attach connectors to drm_atomic_state
9970          */
9971         conn_state = drm_atomic_get_connector_state(state, connector);
9972
9973         ret = PTR_ERR_OR_ZERO(conn_state);
9974         if (ret)
9975                 goto out;
9976
9977         /* Attach crtc to drm_atomic_state*/
9978         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9979
9980         ret = PTR_ERR_OR_ZERO(crtc_state);
9981         if (ret)
9982                 goto out;
9983
9984         /* force a restore */
9985         crtc_state->mode_changed = true;
9986
9987         /* Attach plane to drm_atomic_state */
9988         plane_state = drm_atomic_get_plane_state(state, plane);
9989
9990         ret = PTR_ERR_OR_ZERO(plane_state);
9991         if (ret)
9992                 goto out;
9993
9994         /* Call commit internally with the state we just constructed */
9995         ret = drm_atomic_commit(state);
9996
9997 out:
9998         drm_atomic_state_put(state);
9999         if (ret)
10000                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10001
10002         return ret;
10003 }
10004
10005 /*
10006  * This function handles all cases when set mode does not come upon hotplug.
10007  * This includes when a display is unplugged then plugged back into the
10008  * same port and when running without usermode desktop manager supprot
10009  */
10010 void dm_restore_drm_connector_state(struct drm_device *dev,
10011                                     struct drm_connector *connector)
10012 {
10013         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10014         struct amdgpu_crtc *disconnected_acrtc;
10015         struct dm_crtc_state *acrtc_state;
10016
10017         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10018                 return;
10019
10020         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10021         if (!disconnected_acrtc)
10022                 return;
10023
10024         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10025         if (!acrtc_state->stream)
10026                 return;
10027
10028         /*
10029          * If the previous sink is not released and different from the current,
10030          * we deduce we are in a state where we can not rely on usermode call
10031          * to turn on the display, so we do it here
10032          */
10033         if (acrtc_state->stream->sink != aconnector->dc_sink)
10034                 dm_force_atomic_commit(&aconnector->base);
10035 }
10036
10037 /*
10038  * Grabs all modesetting locks to serialize against any blocking commits,
10039  * Waits for completion of all non blocking commits.
10040  */
10041 static int do_aquire_global_lock(struct drm_device *dev,
10042                                  struct drm_atomic_state *state)
10043 {
10044         struct drm_crtc *crtc;
10045         struct drm_crtc_commit *commit;
10046         long ret;
10047
10048         /*
10049          * Adding all modeset locks to aquire_ctx will
10050          * ensure that when the framework release it the
10051          * extra locks we are locking here will get released to
10052          */
10053         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10054         if (ret)
10055                 return ret;
10056
10057         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10058                 spin_lock(&crtc->commit_lock);
10059                 commit = list_first_entry_or_null(&crtc->commit_list,
10060                                 struct drm_crtc_commit, commit_entry);
10061                 if (commit)
10062                         drm_crtc_commit_get(commit);
10063                 spin_unlock(&crtc->commit_lock);
10064
10065                 if (!commit)
10066                         continue;
10067
10068                 /*
10069                  * Make sure all pending HW programming completed and
10070                  * page flips done
10071                  */
10072                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10073
10074                 if (ret > 0)
10075                         ret = wait_for_completion_interruptible_timeout(
10076                                         &commit->flip_done, 10*HZ);
10077
10078                 if (ret == 0)
10079                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10080                                   "timed out\n", crtc->base.id, crtc->name);
10081
10082                 drm_crtc_commit_put(commit);
10083         }
10084
10085         return ret < 0 ? ret : 0;
10086 }
10087
10088 static void get_freesync_config_for_crtc(
10089         struct dm_crtc_state *new_crtc_state,
10090         struct dm_connector_state *new_con_state)
10091 {
10092         struct mod_freesync_config config = {0};
10093         struct amdgpu_dm_connector *aconnector =
10094                         to_amdgpu_dm_connector(new_con_state->base.connector);
10095         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10096         int vrefresh = drm_mode_vrefresh(mode);
10097         bool fs_vid_mode = false;
10098
10099         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10100                                         vrefresh >= aconnector->min_vfreq &&
10101                                         vrefresh <= aconnector->max_vfreq;
10102
10103         if (new_crtc_state->vrr_supported) {
10104                 new_crtc_state->stream->ignore_msa_timing_param = true;
10105                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10106
10107                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10108                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10109                 config.vsif_supported = true;
10110                 config.btr = true;
10111
10112                 if (fs_vid_mode) {
10113                         config.state = VRR_STATE_ACTIVE_FIXED;
10114                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10115                         goto out;
10116                 } else if (new_crtc_state->base.vrr_enabled) {
10117                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10118                 } else {
10119                         config.state = VRR_STATE_INACTIVE;
10120                 }
10121         }
10122 out:
10123         new_crtc_state->freesync_config = config;
10124 }
10125
10126 static void reset_freesync_config_for_crtc(
10127         struct dm_crtc_state *new_crtc_state)
10128 {
10129         new_crtc_state->vrr_supported = false;
10130
10131         memset(&new_crtc_state->vrr_infopacket, 0,
10132                sizeof(new_crtc_state->vrr_infopacket));
10133 }
10134
10135 static bool
10136 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10137                                  struct drm_crtc_state *new_crtc_state)
10138 {
10139         struct drm_display_mode old_mode, new_mode;
10140
10141         if (!old_crtc_state || !new_crtc_state)
10142                 return false;
10143
10144         old_mode = old_crtc_state->mode;
10145         new_mode = new_crtc_state->mode;
10146
10147         if (old_mode.clock       == new_mode.clock &&
10148             old_mode.hdisplay    == new_mode.hdisplay &&
10149             old_mode.vdisplay    == new_mode.vdisplay &&
10150             old_mode.htotal      == new_mode.htotal &&
10151             old_mode.vtotal      != new_mode.vtotal &&
10152             old_mode.hsync_start == new_mode.hsync_start &&
10153             old_mode.vsync_start != new_mode.vsync_start &&
10154             old_mode.hsync_end   == new_mode.hsync_end &&
10155             old_mode.vsync_end   != new_mode.vsync_end &&
10156             old_mode.hskew       == new_mode.hskew &&
10157             old_mode.vscan       == new_mode.vscan &&
10158             (old_mode.vsync_end - old_mode.vsync_start) ==
10159             (new_mode.vsync_end - new_mode.vsync_start))
10160                 return true;
10161
10162         return false;
10163 }
10164
10165 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10166         uint64_t num, den, res;
10167         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10168
10169         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10170
10171         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10172         den = (unsigned long long)new_crtc_state->mode.htotal *
10173               (unsigned long long)new_crtc_state->mode.vtotal;
10174
10175         res = div_u64(num, den);
10176         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10177 }
10178
10179 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10180                                 struct drm_atomic_state *state,
10181                                 struct drm_crtc *crtc,
10182                                 struct drm_crtc_state *old_crtc_state,
10183                                 struct drm_crtc_state *new_crtc_state,
10184                                 bool enable,
10185                                 bool *lock_and_validation_needed)
10186 {
10187         struct dm_atomic_state *dm_state = NULL;
10188         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10189         struct dc_stream_state *new_stream;
10190         int ret = 0;
10191
10192         /*
10193          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10194          * update changed items
10195          */
10196         struct amdgpu_crtc *acrtc = NULL;
10197         struct amdgpu_dm_connector *aconnector = NULL;
10198         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10199         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10200
10201         new_stream = NULL;
10202
10203         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10204         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10205         acrtc = to_amdgpu_crtc(crtc);
10206         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10207
10208         /* TODO This hack should go away */
10209         if (aconnector && enable) {
10210                 /* Make sure fake sink is created in plug-in scenario */
10211                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10212                                                             &aconnector->base);
10213                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10214                                                             &aconnector->base);
10215
10216                 if (IS_ERR(drm_new_conn_state)) {
10217                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10218                         goto fail;
10219                 }
10220
10221                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10222                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10223
10224                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10225                         goto skip_modeset;
10226
10227                 new_stream = create_validate_stream_for_sink(aconnector,
10228                                                              &new_crtc_state->mode,
10229                                                              dm_new_conn_state,
10230                                                              dm_old_crtc_state->stream);
10231
10232                 /*
10233                  * we can have no stream on ACTION_SET if a display
10234                  * was disconnected during S3, in this case it is not an
10235                  * error, the OS will be updated after detection, and
10236                  * will do the right thing on next atomic commit
10237                  */
10238
10239                 if (!new_stream) {
10240                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10241                                         __func__, acrtc->base.base.id);
10242                         ret = -ENOMEM;
10243                         goto fail;
10244                 }
10245
10246                 /*
10247                  * TODO: Check VSDB bits to decide whether this should
10248                  * be enabled or not.
10249                  */
10250                 new_stream->triggered_crtc_reset.enabled =
10251                         dm->force_timing_sync;
10252
10253                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10254
10255                 ret = fill_hdr_info_packet(drm_new_conn_state,
10256                                            &new_stream->hdr_static_metadata);
10257                 if (ret)
10258                         goto fail;
10259
10260                 /*
10261                  * If we already removed the old stream from the context
10262                  * (and set the new stream to NULL) then we can't reuse
10263                  * the old stream even if the stream and scaling are unchanged.
10264                  * We'll hit the BUG_ON and black screen.
10265                  *
10266                  * TODO: Refactor this function to allow this check to work
10267                  * in all conditions.
10268                  */
10269                 if (dm_new_crtc_state->stream &&
10270                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10271                         goto skip_modeset;
10272
10273                 if (dm_new_crtc_state->stream &&
10274                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10275                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10276                         new_crtc_state->mode_changed = false;
10277                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10278                                          new_crtc_state->mode_changed);
10279                 }
10280         }
10281
10282         /* mode_changed flag may get updated above, need to check again */
10283         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10284                 goto skip_modeset;
10285
10286         DRM_DEBUG_ATOMIC(
10287                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10288                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10289                 "connectors_changed:%d\n",
10290                 acrtc->crtc_id,
10291                 new_crtc_state->enable,
10292                 new_crtc_state->active,
10293                 new_crtc_state->planes_changed,
10294                 new_crtc_state->mode_changed,
10295                 new_crtc_state->active_changed,
10296                 new_crtc_state->connectors_changed);
10297
10298         /* Remove stream for any changed/disabled CRTC */
10299         if (!enable) {
10300
10301                 if (!dm_old_crtc_state->stream)
10302                         goto skip_modeset;
10303
10304                 if (dm_new_crtc_state->stream &&
10305                     is_timing_unchanged_for_freesync(new_crtc_state,
10306                                                      old_crtc_state)) {
10307                         new_crtc_state->mode_changed = false;
10308                         DRM_DEBUG_DRIVER(
10309                                 "Mode change not required for front porch change, "
10310                                 "setting mode_changed to %d",
10311                                 new_crtc_state->mode_changed);
10312
10313                         set_freesync_fixed_config(dm_new_crtc_state);
10314
10315                         goto skip_modeset;
10316                 } else if (aconnector &&
10317                            is_freesync_video_mode(&new_crtc_state->mode,
10318                                                   aconnector)) {
10319                         struct drm_display_mode *high_mode;
10320
10321                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10322                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10323                                 set_freesync_fixed_config(dm_new_crtc_state);
10324                         }
10325                 }
10326
10327                 ret = dm_atomic_get_state(state, &dm_state);
10328                 if (ret)
10329                         goto fail;
10330
10331                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10332                                 crtc->base.id);
10333
10334                 /* i.e. reset mode */
10335                 if (dc_remove_stream_from_ctx(
10336                                 dm->dc,
10337                                 dm_state->context,
10338                                 dm_old_crtc_state->stream) != DC_OK) {
10339                         ret = -EINVAL;
10340                         goto fail;
10341                 }
10342
10343                 dc_stream_release(dm_old_crtc_state->stream);
10344                 dm_new_crtc_state->stream = NULL;
10345
10346                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10347
10348                 *lock_and_validation_needed = true;
10349
10350         } else {/* Add stream for any updated/enabled CRTC */
10351                 /*
10352                  * Quick fix to prevent NULL pointer on new_stream when
10353                  * added MST connectors not found in existing crtc_state in the chained mode
10354                  * TODO: need to dig out the root cause of that
10355                  */
10356                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10357                         goto skip_modeset;
10358
10359                 if (modereset_required(new_crtc_state))
10360                         goto skip_modeset;
10361
10362                 if (modeset_required(new_crtc_state, new_stream,
10363                                      dm_old_crtc_state->stream)) {
10364
10365                         WARN_ON(dm_new_crtc_state->stream);
10366
10367                         ret = dm_atomic_get_state(state, &dm_state);
10368                         if (ret)
10369                                 goto fail;
10370
10371                         dm_new_crtc_state->stream = new_stream;
10372
10373                         dc_stream_retain(new_stream);
10374
10375                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10376                                          crtc->base.id);
10377
10378                         if (dc_add_stream_to_ctx(
10379                                         dm->dc,
10380                                         dm_state->context,
10381                                         dm_new_crtc_state->stream) != DC_OK) {
10382                                 ret = -EINVAL;
10383                                 goto fail;
10384                         }
10385
10386                         *lock_and_validation_needed = true;
10387                 }
10388         }
10389
10390 skip_modeset:
10391         /* Release extra reference */
10392         if (new_stream)
10393                  dc_stream_release(new_stream);
10394
10395         /*
10396          * We want to do dc stream updates that do not require a
10397          * full modeset below.
10398          */
10399         if (!(enable && aconnector && new_crtc_state->active))
10400                 return 0;
10401         /*
10402          * Given above conditions, the dc state cannot be NULL because:
10403          * 1. We're in the process of enabling CRTCs (just been added
10404          *    to the dc context, or already is on the context)
10405          * 2. Has a valid connector attached, and
10406          * 3. Is currently active and enabled.
10407          * => The dc stream state currently exists.
10408          */
10409         BUG_ON(dm_new_crtc_state->stream == NULL);
10410
10411         /* Scaling or underscan settings */
10412         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10413                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10414                 update_stream_scaling_settings(
10415                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10416
10417         /* ABM settings */
10418         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10419
10420         /*
10421          * Color management settings. We also update color properties
10422          * when a modeset is needed, to ensure it gets reprogrammed.
10423          */
10424         if (dm_new_crtc_state->base.color_mgmt_changed ||
10425             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10426                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10427                 if (ret)
10428                         goto fail;
10429         }
10430
10431         /* Update Freesync settings. */
10432         get_freesync_config_for_crtc(dm_new_crtc_state,
10433                                      dm_new_conn_state);
10434
10435         return ret;
10436
10437 fail:
10438         if (new_stream)
10439                 dc_stream_release(new_stream);
10440         return ret;
10441 }
10442
10443 static bool should_reset_plane(struct drm_atomic_state *state,
10444                                struct drm_plane *plane,
10445                                struct drm_plane_state *old_plane_state,
10446                                struct drm_plane_state *new_plane_state)
10447 {
10448         struct drm_plane *other;
10449         struct drm_plane_state *old_other_state, *new_other_state;
10450         struct drm_crtc_state *new_crtc_state;
10451         int i;
10452
10453         /*
10454          * TODO: Remove this hack once the checks below are sufficient
10455          * enough to determine when we need to reset all the planes on
10456          * the stream.
10457          */
10458         if (state->allow_modeset)
10459                 return true;
10460
10461         /* Exit early if we know that we're adding or removing the plane. */
10462         if (old_plane_state->crtc != new_plane_state->crtc)
10463                 return true;
10464
10465         /* old crtc == new_crtc == NULL, plane not in context. */
10466         if (!new_plane_state->crtc)
10467                 return false;
10468
10469         new_crtc_state =
10470                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10471
10472         if (!new_crtc_state)
10473                 return true;
10474
10475         /* CRTC Degamma changes currently require us to recreate planes. */
10476         if (new_crtc_state->color_mgmt_changed)
10477                 return true;
10478
10479         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10480                 return true;
10481
10482         /*
10483          * If there are any new primary or overlay planes being added or
10484          * removed then the z-order can potentially change. To ensure
10485          * correct z-order and pipe acquisition the current DC architecture
10486          * requires us to remove and recreate all existing planes.
10487          *
10488          * TODO: Come up with a more elegant solution for this.
10489          */
10490         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10491                 struct amdgpu_framebuffer *old_afb, *new_afb;
10492                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10493                         continue;
10494
10495                 if (old_other_state->crtc != new_plane_state->crtc &&
10496                     new_other_state->crtc != new_plane_state->crtc)
10497                         continue;
10498
10499                 if (old_other_state->crtc != new_other_state->crtc)
10500                         return true;
10501
10502                 /* Src/dst size and scaling updates. */
10503                 if (old_other_state->src_w != new_other_state->src_w ||
10504                     old_other_state->src_h != new_other_state->src_h ||
10505                     old_other_state->crtc_w != new_other_state->crtc_w ||
10506                     old_other_state->crtc_h != new_other_state->crtc_h)
10507                         return true;
10508
10509                 /* Rotation / mirroring updates. */
10510                 if (old_other_state->rotation != new_other_state->rotation)
10511                         return true;
10512
10513                 /* Blending updates. */
10514                 if (old_other_state->pixel_blend_mode !=
10515                     new_other_state->pixel_blend_mode)
10516                         return true;
10517
10518                 /* Alpha updates. */
10519                 if (old_other_state->alpha != new_other_state->alpha)
10520                         return true;
10521
10522                 /* Colorspace changes. */
10523                 if (old_other_state->color_range != new_other_state->color_range ||
10524                     old_other_state->color_encoding != new_other_state->color_encoding)
10525                         return true;
10526
10527                 /* Framebuffer checks fall at the end. */
10528                 if (!old_other_state->fb || !new_other_state->fb)
10529                         continue;
10530
10531                 /* Pixel format changes can require bandwidth updates. */
10532                 if (old_other_state->fb->format != new_other_state->fb->format)
10533                         return true;
10534
10535                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10536                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10537
10538                 /* Tiling and DCC changes also require bandwidth updates. */
10539                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10540                     old_afb->base.modifier != new_afb->base.modifier)
10541                         return true;
10542         }
10543
10544         return false;
10545 }
10546
10547 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10548                               struct drm_plane_state *new_plane_state,
10549                               struct drm_framebuffer *fb)
10550 {
10551         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10552         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10553         unsigned int pitch;
10554         bool linear;
10555
10556         if (fb->width > new_acrtc->max_cursor_width ||
10557             fb->height > new_acrtc->max_cursor_height) {
10558                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10559                                  new_plane_state->fb->width,
10560                                  new_plane_state->fb->height);
10561                 return -EINVAL;
10562         }
10563         if (new_plane_state->src_w != fb->width << 16 ||
10564             new_plane_state->src_h != fb->height << 16) {
10565                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10566                 return -EINVAL;
10567         }
10568
10569         /* Pitch in pixels */
10570         pitch = fb->pitches[0] / fb->format->cpp[0];
10571
10572         if (fb->width != pitch) {
10573                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10574                                  fb->width, pitch);
10575                 return -EINVAL;
10576         }
10577
10578         switch (pitch) {
10579         case 64:
10580         case 128:
10581         case 256:
10582                 /* FB pitch is supported by cursor plane */
10583                 break;
10584         default:
10585                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10586                 return -EINVAL;
10587         }
10588
10589         /* Core DRM takes care of checking FB modifiers, so we only need to
10590          * check tiling flags when the FB doesn't have a modifier. */
10591         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10592                 if (adev->family < AMDGPU_FAMILY_AI) {
10593                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10594                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10595                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10596                 } else {
10597                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10598                 }
10599                 if (!linear) {
10600                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10601                         return -EINVAL;
10602                 }
10603         }
10604
10605         return 0;
10606 }
10607
10608 static int dm_update_plane_state(struct dc *dc,
10609                                  struct drm_atomic_state *state,
10610                                  struct drm_plane *plane,
10611                                  struct drm_plane_state *old_plane_state,
10612                                  struct drm_plane_state *new_plane_state,
10613                                  bool enable,
10614                                  bool *lock_and_validation_needed)
10615 {
10616
10617         struct dm_atomic_state *dm_state = NULL;
10618         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10619         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10620         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10621         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10622         struct amdgpu_crtc *new_acrtc;
10623         bool needs_reset;
10624         int ret = 0;
10625
10626
10627         new_plane_crtc = new_plane_state->crtc;
10628         old_plane_crtc = old_plane_state->crtc;
10629         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10630         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10631
10632         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10633                 if (!enable || !new_plane_crtc ||
10634                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10635                         return 0;
10636
10637                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10638
10639                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10640                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10641                         return -EINVAL;
10642                 }
10643
10644                 if (new_plane_state->fb) {
10645                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10646                                                  new_plane_state->fb);
10647                         if (ret)
10648                                 return ret;
10649                 }
10650
10651                 return 0;
10652         }
10653
10654         needs_reset = should_reset_plane(state, plane, old_plane_state,
10655                                          new_plane_state);
10656
10657         /* Remove any changed/removed planes */
10658         if (!enable) {
10659                 if (!needs_reset)
10660                         return 0;
10661
10662                 if (!old_plane_crtc)
10663                         return 0;
10664
10665                 old_crtc_state = drm_atomic_get_old_crtc_state(
10666                                 state, old_plane_crtc);
10667                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10668
10669                 if (!dm_old_crtc_state->stream)
10670                         return 0;
10671
10672                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10673                                 plane->base.id, old_plane_crtc->base.id);
10674
10675                 ret = dm_atomic_get_state(state, &dm_state);
10676                 if (ret)
10677                         return ret;
10678
10679                 if (!dc_remove_plane_from_context(
10680                                 dc,
10681                                 dm_old_crtc_state->stream,
10682                                 dm_old_plane_state->dc_state,
10683                                 dm_state->context)) {
10684
10685                         return -EINVAL;
10686                 }
10687
10688
10689                 dc_plane_state_release(dm_old_plane_state->dc_state);
10690                 dm_new_plane_state->dc_state = NULL;
10691
10692                 *lock_and_validation_needed = true;
10693
10694         } else { /* Add new planes */
10695                 struct dc_plane_state *dc_new_plane_state;
10696
10697                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10698                         return 0;
10699
10700                 if (!new_plane_crtc)
10701                         return 0;
10702
10703                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10704                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10705
10706                 if (!dm_new_crtc_state->stream)
10707                         return 0;
10708
10709                 if (!needs_reset)
10710                         return 0;
10711
10712                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10713                 if (ret)
10714                         return ret;
10715
10716                 WARN_ON(dm_new_plane_state->dc_state);
10717
10718                 dc_new_plane_state = dc_create_plane_state(dc);
10719                 if (!dc_new_plane_state)
10720                         return -ENOMEM;
10721
10722                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10723                                  plane->base.id, new_plane_crtc->base.id);
10724
10725                 ret = fill_dc_plane_attributes(
10726                         drm_to_adev(new_plane_crtc->dev),
10727                         dc_new_plane_state,
10728                         new_plane_state,
10729                         new_crtc_state);
10730                 if (ret) {
10731                         dc_plane_state_release(dc_new_plane_state);
10732                         return ret;
10733                 }
10734
10735                 ret = dm_atomic_get_state(state, &dm_state);
10736                 if (ret) {
10737                         dc_plane_state_release(dc_new_plane_state);
10738                         return ret;
10739                 }
10740
10741                 /*
10742                  * Any atomic check errors that occur after this will
10743                  * not need a release. The plane state will be attached
10744                  * to the stream, and therefore part of the atomic
10745                  * state. It'll be released when the atomic state is
10746                  * cleaned.
10747                  */
10748                 if (!dc_add_plane_to_context(
10749                                 dc,
10750                                 dm_new_crtc_state->stream,
10751                                 dc_new_plane_state,
10752                                 dm_state->context)) {
10753
10754                         dc_plane_state_release(dc_new_plane_state);
10755                         return -EINVAL;
10756                 }
10757
10758                 dm_new_plane_state->dc_state = dc_new_plane_state;
10759
10760                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10761
10762                 /* Tell DC to do a full surface update every time there
10763                  * is a plane change. Inefficient, but works for now.
10764                  */
10765                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10766
10767                 *lock_and_validation_needed = true;
10768         }
10769
10770
10771         return ret;
10772 }
10773
10774 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10775                                        int *src_w, int *src_h)
10776 {
10777         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10778         case DRM_MODE_ROTATE_90:
10779         case DRM_MODE_ROTATE_270:
10780                 *src_w = plane_state->src_h >> 16;
10781                 *src_h = plane_state->src_w >> 16;
10782                 break;
10783         case DRM_MODE_ROTATE_0:
10784         case DRM_MODE_ROTATE_180:
10785         default:
10786                 *src_w = plane_state->src_w >> 16;
10787                 *src_h = plane_state->src_h >> 16;
10788                 break;
10789         }
10790 }
10791
10792 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10793                                 struct drm_crtc *crtc,
10794                                 struct drm_crtc_state *new_crtc_state)
10795 {
10796         struct drm_plane *cursor = crtc->cursor, *underlying;
10797         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10798         int i;
10799         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10800         int cursor_src_w, cursor_src_h;
10801         int underlying_src_w, underlying_src_h;
10802
10803         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10804          * cursor per pipe but it's going to inherit the scaling and
10805          * positioning from the underlying pipe. Check the cursor plane's
10806          * blending properties match the underlying planes'. */
10807
10808         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10809         if (!new_cursor_state || !new_cursor_state->fb) {
10810                 return 0;
10811         }
10812
10813         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10814         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10815         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10816
10817         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10818                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10819                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10820                         continue;
10821
10822                 /* Ignore disabled planes */
10823                 if (!new_underlying_state->fb)
10824                         continue;
10825
10826                 dm_get_oriented_plane_size(new_underlying_state,
10827                                            &underlying_src_w, &underlying_src_h);
10828                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10829                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10830
10831                 if (cursor_scale_w != underlying_scale_w ||
10832                     cursor_scale_h != underlying_scale_h) {
10833                         drm_dbg_atomic(crtc->dev,
10834                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10835                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10836                         return -EINVAL;
10837                 }
10838
10839                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10840                 if (new_underlying_state->crtc_x <= 0 &&
10841                     new_underlying_state->crtc_y <= 0 &&
10842                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10843                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10844                         break;
10845         }
10846
10847         return 0;
10848 }
10849
10850 #if defined(CONFIG_DRM_AMD_DC_DCN)
10851 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10852 {
10853         struct drm_connector *connector;
10854         struct drm_connector_state *conn_state;
10855         struct amdgpu_dm_connector *aconnector = NULL;
10856         int i;
10857         for_each_new_connector_in_state(state, connector, conn_state, i) {
10858                 if (conn_state->crtc != crtc)
10859                         continue;
10860
10861                 aconnector = to_amdgpu_dm_connector(connector);
10862                 if (!aconnector->port || !aconnector->mst_port)
10863                         aconnector = NULL;
10864                 else
10865                         break;
10866         }
10867
10868         if (!aconnector)
10869                 return 0;
10870
10871         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10872 }
10873 #endif
10874
10875 /**
10876  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10877  * @dev: The DRM device
10878  * @state: The atomic state to commit
10879  *
10880  * Validate that the given atomic state is programmable by DC into hardware.
10881  * This involves constructing a &struct dc_state reflecting the new hardware
10882  * state we wish to commit, then querying DC to see if it is programmable. It's
10883  * important not to modify the existing DC state. Otherwise, atomic_check
10884  * may unexpectedly commit hardware changes.
10885  *
10886  * When validating the DC state, it's important that the right locks are
10887  * acquired. For full updates case which removes/adds/updates streams on one
10888  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10889  * that any such full update commit will wait for completion of any outstanding
10890  * flip using DRMs synchronization events.
10891  *
10892  * Note that DM adds the affected connectors for all CRTCs in state, when that
10893  * might not seem necessary. This is because DC stream creation requires the
10894  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10895  * be possible but non-trivial - a possible TODO item.
10896  *
10897  * Return: -Error code if validation failed.
10898  */
10899 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10900                                   struct drm_atomic_state *state)
10901 {
10902         struct amdgpu_device *adev = drm_to_adev(dev);
10903         struct dm_atomic_state *dm_state = NULL;
10904         struct dc *dc = adev->dm.dc;
10905         struct drm_connector *connector;
10906         struct drm_connector_state *old_con_state, *new_con_state;
10907         struct drm_crtc *crtc;
10908         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10909         struct drm_plane *plane;
10910         struct drm_plane_state *old_plane_state, *new_plane_state;
10911         enum dc_status status;
10912         int ret, i;
10913         bool lock_and_validation_needed = false;
10914         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10915 #if defined(CONFIG_DRM_AMD_DC_DCN)
10916         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10917         struct drm_dp_mst_topology_state *mst_state;
10918         struct drm_dp_mst_topology_mgr *mgr;
10919 #endif
10920
10921         trace_amdgpu_dm_atomic_check_begin(state);
10922
10923         ret = drm_atomic_helper_check_modeset(dev, state);
10924         if (ret) {
10925                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10926                 goto fail;
10927         }
10928
10929         /* Check connector changes */
10930         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10931                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10932                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10933
10934                 /* Skip connectors that are disabled or part of modeset already. */
10935                 if (!old_con_state->crtc && !new_con_state->crtc)
10936                         continue;
10937
10938                 if (!new_con_state->crtc)
10939                         continue;
10940
10941                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10942                 if (IS_ERR(new_crtc_state)) {
10943                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10944                         ret = PTR_ERR(new_crtc_state);
10945                         goto fail;
10946                 }
10947
10948                 if (dm_old_con_state->abm_level !=
10949                     dm_new_con_state->abm_level)
10950                         new_crtc_state->connectors_changed = true;
10951         }
10952
10953 #if defined(CONFIG_DRM_AMD_DC_DCN)
10954         if (dc_resource_is_dsc_encoding_supported(dc)) {
10955                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10956                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10957                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10958                                 if (ret) {
10959                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10960                                         goto fail;
10961                                 }
10962                         }
10963                 }
10964         }
10965 #endif
10966         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10967                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10968
10969                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10970                     !new_crtc_state->color_mgmt_changed &&
10971                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10972                         dm_old_crtc_state->dsc_force_changed == false)
10973                         continue;
10974
10975                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10976                 if (ret) {
10977                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10978                         goto fail;
10979                 }
10980
10981                 if (!new_crtc_state->enable)
10982                         continue;
10983
10984                 ret = drm_atomic_add_affected_connectors(state, crtc);
10985                 if (ret) {
10986                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10987                         goto fail;
10988                 }
10989
10990                 ret = drm_atomic_add_affected_planes(state, crtc);
10991                 if (ret) {
10992                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10993                         goto fail;
10994                 }
10995
10996                 if (dm_old_crtc_state->dsc_force_changed)
10997                         new_crtc_state->mode_changed = true;
10998         }
10999
11000         /*
11001          * Add all primary and overlay planes on the CRTC to the state
11002          * whenever a plane is enabled to maintain correct z-ordering
11003          * and to enable fast surface updates.
11004          */
11005         drm_for_each_crtc(crtc, dev) {
11006                 bool modified = false;
11007
11008                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11009                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11010                                 continue;
11011
11012                         if (new_plane_state->crtc == crtc ||
11013                             old_plane_state->crtc == crtc) {
11014                                 modified = true;
11015                                 break;
11016                         }
11017                 }
11018
11019                 if (!modified)
11020                         continue;
11021
11022                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11023                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11024                                 continue;
11025
11026                         new_plane_state =
11027                                 drm_atomic_get_plane_state(state, plane);
11028
11029                         if (IS_ERR(new_plane_state)) {
11030                                 ret = PTR_ERR(new_plane_state);
11031                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11032                                 goto fail;
11033                         }
11034                 }
11035         }
11036
11037         /* Remove exiting planes if they are modified */
11038         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11039                 ret = dm_update_plane_state(dc, state, plane,
11040                                             old_plane_state,
11041                                             new_plane_state,
11042                                             false,
11043                                             &lock_and_validation_needed);
11044                 if (ret) {
11045                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11046                         goto fail;
11047                 }
11048         }
11049
11050         /* Disable all crtcs which require disable */
11051         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11052                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11053                                            old_crtc_state,
11054                                            new_crtc_state,
11055                                            false,
11056                                            &lock_and_validation_needed);
11057                 if (ret) {
11058                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11059                         goto fail;
11060                 }
11061         }
11062
11063         /* Enable all crtcs which require enable */
11064         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11065                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11066                                            old_crtc_state,
11067                                            new_crtc_state,
11068                                            true,
11069                                            &lock_and_validation_needed);
11070                 if (ret) {
11071                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11072                         goto fail;
11073                 }
11074         }
11075
11076         /* Add new/modified planes */
11077         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11078                 ret = dm_update_plane_state(dc, state, plane,
11079                                             old_plane_state,
11080                                             new_plane_state,
11081                                             true,
11082                                             &lock_and_validation_needed);
11083                 if (ret) {
11084                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11085                         goto fail;
11086                 }
11087         }
11088
11089         /* Run this here since we want to validate the streams we created */
11090         ret = drm_atomic_helper_check_planes(dev, state);
11091         if (ret) {
11092                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11093                 goto fail;
11094         }
11095
11096         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11097                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11098                 if (dm_new_crtc_state->mpo_requested)
11099                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11100         }
11101
11102         /* Check cursor planes scaling */
11103         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11104                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11105                 if (ret) {
11106                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11107                         goto fail;
11108                 }
11109         }
11110
11111         if (state->legacy_cursor_update) {
11112                 /*
11113                  * This is a fast cursor update coming from the plane update
11114                  * helper, check if it can be done asynchronously for better
11115                  * performance.
11116                  */
11117                 state->async_update =
11118                         !drm_atomic_helper_async_check(dev, state);
11119
11120                 /*
11121                  * Skip the remaining global validation if this is an async
11122                  * update. Cursor updates can be done without affecting
11123                  * state or bandwidth calcs and this avoids the performance
11124                  * penalty of locking the private state object and
11125                  * allocating a new dc_state.
11126                  */
11127                 if (state->async_update)
11128                         return 0;
11129         }
11130
11131         /* Check scaling and underscan changes*/
11132         /* TODO Removed scaling changes validation due to inability to commit
11133          * new stream into context w\o causing full reset. Need to
11134          * decide how to handle.
11135          */
11136         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11137                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11138                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11139                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11140
11141                 /* Skip any modesets/resets */
11142                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11143                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11144                         continue;
11145
11146                 /* Skip any thing not scale or underscan changes */
11147                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11148                         continue;
11149
11150                 lock_and_validation_needed = true;
11151         }
11152
11153 #if defined(CONFIG_DRM_AMD_DC_DCN)
11154         /* set the slot info for each mst_state based on the link encoding format */
11155         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11156                 struct amdgpu_dm_connector *aconnector;
11157                 struct drm_connector *connector;
11158                 struct drm_connector_list_iter iter;
11159                 u8 link_coding_cap;
11160
11161                 if (!mgr->mst_state )
11162                         continue;
11163
11164                 drm_connector_list_iter_begin(dev, &iter);
11165                 drm_for_each_connector_iter(connector, &iter) {
11166                         int id = connector->index;
11167
11168                         if (id == mst_state->mgr->conn_base_id) {
11169                                 aconnector = to_amdgpu_dm_connector(connector);
11170                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11171                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11172
11173                                 break;
11174                         }
11175                 }
11176                 drm_connector_list_iter_end(&iter);
11177
11178         }
11179 #endif
11180         /**
11181          * Streams and planes are reset when there are changes that affect
11182          * bandwidth. Anything that affects bandwidth needs to go through
11183          * DC global validation to ensure that the configuration can be applied
11184          * to hardware.
11185          *
11186          * We have to currently stall out here in atomic_check for outstanding
11187          * commits to finish in this case because our IRQ handlers reference
11188          * DRM state directly - we can end up disabling interrupts too early
11189          * if we don't.
11190          *
11191          * TODO: Remove this stall and drop DM state private objects.
11192          */
11193         if (lock_and_validation_needed) {
11194                 ret = dm_atomic_get_state(state, &dm_state);
11195                 if (ret) {
11196                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11197                         goto fail;
11198                 }
11199
11200                 ret = do_aquire_global_lock(dev, state);
11201                 if (ret) {
11202                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11203                         goto fail;
11204                 }
11205
11206 #if defined(CONFIG_DRM_AMD_DC_DCN)
11207                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11208                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11209                         goto fail;
11210                 }
11211
11212                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11213                 if (ret) {
11214                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11215                         goto fail;
11216                 }
11217 #endif
11218
11219                 /*
11220                  * Perform validation of MST topology in the state:
11221                  * We need to perform MST atomic check before calling
11222                  * dc_validate_global_state(), or there is a chance
11223                  * to get stuck in an infinite loop and hang eventually.
11224                  */
11225                 ret = drm_dp_mst_atomic_check(state);
11226                 if (ret) {
11227                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11228                         goto fail;
11229                 }
11230                 status = dc_validate_global_state(dc, dm_state->context, true);
11231                 if (status != DC_OK) {
11232                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11233                                        dc_status_to_str(status), status);
11234                         ret = -EINVAL;
11235                         goto fail;
11236                 }
11237         } else {
11238                 /*
11239                  * The commit is a fast update. Fast updates shouldn't change
11240                  * the DC context, affect global validation, and can have their
11241                  * commit work done in parallel with other commits not touching
11242                  * the same resource. If we have a new DC context as part of
11243                  * the DM atomic state from validation we need to free it and
11244                  * retain the existing one instead.
11245                  *
11246                  * Furthermore, since the DM atomic state only contains the DC
11247                  * context and can safely be annulled, we can free the state
11248                  * and clear the associated private object now to free
11249                  * some memory and avoid a possible use-after-free later.
11250                  */
11251
11252                 for (i = 0; i < state->num_private_objs; i++) {
11253                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11254
11255                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11256                                 int j = state->num_private_objs-1;
11257
11258                                 dm_atomic_destroy_state(obj,
11259                                                 state->private_objs[i].state);
11260
11261                                 /* If i is not at the end of the array then the
11262                                  * last element needs to be moved to where i was
11263                                  * before the array can safely be truncated.
11264                                  */
11265                                 if (i != j)
11266                                         state->private_objs[i] =
11267                                                 state->private_objs[j];
11268
11269                                 state->private_objs[j].ptr = NULL;
11270                                 state->private_objs[j].state = NULL;
11271                                 state->private_objs[j].old_state = NULL;
11272                                 state->private_objs[j].new_state = NULL;
11273
11274                                 state->num_private_objs = j;
11275                                 break;
11276                         }
11277                 }
11278         }
11279
11280         /* Store the overall update type for use later in atomic check. */
11281         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11282                 struct dm_crtc_state *dm_new_crtc_state =
11283                         to_dm_crtc_state(new_crtc_state);
11284
11285                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11286                                                          UPDATE_TYPE_FULL :
11287                                                          UPDATE_TYPE_FAST;
11288         }
11289
11290         /* Must be success */
11291         WARN_ON(ret);
11292
11293         trace_amdgpu_dm_atomic_check_finish(state, ret);
11294
11295         return ret;
11296
11297 fail:
11298         if (ret == -EDEADLK)
11299                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11300         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11301                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11302         else
11303                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11304
11305         trace_amdgpu_dm_atomic_check_finish(state, ret);
11306
11307         return ret;
11308 }
11309
11310 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11311                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11312 {
11313         uint8_t dpcd_data;
11314         bool capable = false;
11315
11316         if (amdgpu_dm_connector->dc_link &&
11317                 dm_helpers_dp_read_dpcd(
11318                                 NULL,
11319                                 amdgpu_dm_connector->dc_link,
11320                                 DP_DOWN_STREAM_PORT_COUNT,
11321                                 &dpcd_data,
11322                                 sizeof(dpcd_data))) {
11323                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11324         }
11325
11326         return capable;
11327 }
11328
11329 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11330                 unsigned int offset,
11331                 unsigned int total_length,
11332                 uint8_t *data,
11333                 unsigned int length,
11334                 struct amdgpu_hdmi_vsdb_info *vsdb)
11335 {
11336         bool res;
11337         union dmub_rb_cmd cmd;
11338         struct dmub_cmd_send_edid_cea *input;
11339         struct dmub_cmd_edid_cea_output *output;
11340
11341         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11342                 return false;
11343
11344         memset(&cmd, 0, sizeof(cmd));
11345
11346         input = &cmd.edid_cea.data.input;
11347
11348         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11349         cmd.edid_cea.header.sub_type = 0;
11350         cmd.edid_cea.header.payload_bytes =
11351                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11352         input->offset = offset;
11353         input->length = length;
11354         input->cea_total_length = total_length;
11355         memcpy(input->payload, data, length);
11356
11357         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11358         if (!res) {
11359                 DRM_ERROR("EDID CEA parser failed\n");
11360                 return false;
11361         }
11362
11363         output = &cmd.edid_cea.data.output;
11364
11365         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11366                 if (!output->ack.success) {
11367                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11368                                         output->ack.offset);
11369                 }
11370         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11371                 if (!output->amd_vsdb.vsdb_found)
11372                         return false;
11373
11374                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11375                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11376                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11377                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11378         } else {
11379                 DRM_WARN("Unknown EDID CEA parser results\n");
11380                 return false;
11381         }
11382
11383         return true;
11384 }
11385
11386 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11387                 uint8_t *edid_ext, int len,
11388                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11389 {
11390         int i;
11391
11392         /* send extension block to DMCU for parsing */
11393         for (i = 0; i < len; i += 8) {
11394                 bool res;
11395                 int offset;
11396
11397                 /* send 8 bytes a time */
11398                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11399                         return false;
11400
11401                 if (i+8 == len) {
11402                         /* EDID block sent completed, expect result */
11403                         int version, min_rate, max_rate;
11404
11405                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11406                         if (res) {
11407                                 /* amd vsdb found */
11408                                 vsdb_info->freesync_supported = 1;
11409                                 vsdb_info->amd_vsdb_version = version;
11410                                 vsdb_info->min_refresh_rate_hz = min_rate;
11411                                 vsdb_info->max_refresh_rate_hz = max_rate;
11412                                 return true;
11413                         }
11414                         /* not amd vsdb */
11415                         return false;
11416                 }
11417
11418                 /* check for ack*/
11419                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11420                 if (!res)
11421                         return false;
11422         }
11423
11424         return false;
11425 }
11426
11427 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11428                 uint8_t *edid_ext, int len,
11429                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11430 {
11431         int i;
11432
11433         /* send extension block to DMCU for parsing */
11434         for (i = 0; i < len; i += 8) {
11435                 /* send 8 bytes a time */
11436                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11437                         return false;
11438         }
11439
11440         return vsdb_info->freesync_supported;
11441 }
11442
11443 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11444                 uint8_t *edid_ext, int len,
11445                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11446 {
11447         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11448
11449         if (adev->dm.dmub_srv)
11450                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11451         else
11452                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11453 }
11454
11455 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11456                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11457 {
11458         uint8_t *edid_ext = NULL;
11459         int i;
11460         bool valid_vsdb_found = false;
11461
11462         /*----- drm_find_cea_extension() -----*/
11463         /* No EDID or EDID extensions */
11464         if (edid == NULL || edid->extensions == 0)
11465                 return -ENODEV;
11466
11467         /* Find CEA extension */
11468         for (i = 0; i < edid->extensions; i++) {
11469                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11470                 if (edid_ext[0] == CEA_EXT)
11471                         break;
11472         }
11473
11474         if (i == edid->extensions)
11475                 return -ENODEV;
11476
11477         /*----- cea_db_offsets() -----*/
11478         if (edid_ext[0] != CEA_EXT)
11479                 return -ENODEV;
11480
11481         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11482
11483         return valid_vsdb_found ? i : -ENODEV;
11484 }
11485
11486 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11487                                         struct edid *edid)
11488 {
11489         int i = 0;
11490         struct detailed_timing *timing;
11491         struct detailed_non_pixel *data;
11492         struct detailed_data_monitor_range *range;
11493         struct amdgpu_dm_connector *amdgpu_dm_connector =
11494                         to_amdgpu_dm_connector(connector);
11495         struct dm_connector_state *dm_con_state = NULL;
11496         struct dc_sink *sink;
11497
11498         struct drm_device *dev = connector->dev;
11499         struct amdgpu_device *adev = drm_to_adev(dev);
11500         bool freesync_capable = false;
11501         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11502
11503         if (!connector->state) {
11504                 DRM_ERROR("%s - Connector has no state", __func__);
11505                 goto update;
11506         }
11507
11508         sink = amdgpu_dm_connector->dc_sink ?
11509                 amdgpu_dm_connector->dc_sink :
11510                 amdgpu_dm_connector->dc_em_sink;
11511
11512         if (!edid || !sink) {
11513                 dm_con_state = to_dm_connector_state(connector->state);
11514
11515                 amdgpu_dm_connector->min_vfreq = 0;
11516                 amdgpu_dm_connector->max_vfreq = 0;
11517                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11518                 connector->display_info.monitor_range.min_vfreq = 0;
11519                 connector->display_info.monitor_range.max_vfreq = 0;
11520                 freesync_capable = false;
11521
11522                 goto update;
11523         }
11524
11525         dm_con_state = to_dm_connector_state(connector->state);
11526
11527         if (!adev->dm.freesync_module)
11528                 goto update;
11529
11530
11531         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11532                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11533                 bool edid_check_required = false;
11534
11535                 if (edid) {
11536                         edid_check_required = is_dp_capable_without_timing_msa(
11537                                                 adev->dm.dc,
11538                                                 amdgpu_dm_connector);
11539                 }
11540
11541                 if (edid_check_required == true && (edid->version > 1 ||
11542                    (edid->version == 1 && edid->revision > 1))) {
11543                         for (i = 0; i < 4; i++) {
11544
11545                                 timing  = &edid->detailed_timings[i];
11546                                 data    = &timing->data.other_data;
11547                                 range   = &data->data.range;
11548                                 /*
11549                                  * Check if monitor has continuous frequency mode
11550                                  */
11551                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11552                                         continue;
11553                                 /*
11554                                  * Check for flag range limits only. If flag == 1 then
11555                                  * no additional timing information provided.
11556                                  * Default GTF, GTF Secondary curve and CVT are not
11557                                  * supported
11558                                  */
11559                                 if (range->flags != 1)
11560                                         continue;
11561
11562                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11563                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11564                                 amdgpu_dm_connector->pixel_clock_mhz =
11565                                         range->pixel_clock_mhz * 10;
11566
11567                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11568                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11569
11570                                 break;
11571                         }
11572
11573                         if (amdgpu_dm_connector->max_vfreq -
11574                             amdgpu_dm_connector->min_vfreq > 10) {
11575
11576                                 freesync_capable = true;
11577                         }
11578                 }
11579         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11580                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11581                 if (i >= 0 && vsdb_info.freesync_supported) {
11582                         timing  = &edid->detailed_timings[i];
11583                         data    = &timing->data.other_data;
11584
11585                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11586                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11587                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11588                                 freesync_capable = true;
11589
11590                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11591                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11592                 }
11593         }
11594
11595 update:
11596         if (dm_con_state)
11597                 dm_con_state->freesync_capable = freesync_capable;
11598
11599         if (connector->vrr_capable_property)
11600                 drm_connector_set_vrr_capable_property(connector,
11601                                                        freesync_capable);
11602 }
11603
11604 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11605 {
11606         struct amdgpu_device *adev = drm_to_adev(dev);
11607         struct dc *dc = adev->dm.dc;
11608         int i;
11609
11610         mutex_lock(&adev->dm.dc_lock);
11611         if (dc->current_state) {
11612                 for (i = 0; i < dc->current_state->stream_count; ++i)
11613                         dc->current_state->streams[i]
11614                                 ->triggered_crtc_reset.enabled =
11615                                 adev->dm.force_timing_sync;
11616
11617                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11618                 dc_trigger_sync(dc, dc->current_state);
11619         }
11620         mutex_unlock(&adev->dm.dc_lock);
11621 }
11622
11623 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11624                        uint32_t value, const char *func_name)
11625 {
11626 #ifdef DM_CHECK_ADDR_0
11627         if (address == 0) {
11628                 DC_ERR("invalid register write. address = 0");
11629                 return;
11630         }
11631 #endif
11632         cgs_write_register(ctx->cgs_device, address, value);
11633         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11634 }
11635
11636 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11637                           const char *func_name)
11638 {
11639         uint32_t value;
11640 #ifdef DM_CHECK_ADDR_0
11641         if (address == 0) {
11642                 DC_ERR("invalid register read; address = 0\n");
11643                 return 0;
11644         }
11645 #endif
11646
11647         if (ctx->dmub_srv &&
11648             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11649             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11650                 ASSERT(false);
11651                 return 0;
11652         }
11653
11654         value = cgs_read_register(ctx->cgs_device, address);
11655
11656         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11657
11658         return value;
11659 }
11660
11661 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11662                                                 struct dc_context *ctx,
11663                                                 uint8_t status_type,
11664                                                 uint32_t *operation_result)
11665 {
11666         struct amdgpu_device *adev = ctx->driver_context;
11667         int return_status = -1;
11668         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11669
11670         if (is_cmd_aux) {
11671                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11672                         return_status = p_notify->aux_reply.length;
11673                         *operation_result = p_notify->result;
11674                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11675                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11676                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11677                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11678                 } else {
11679                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11680                 }
11681         } else {
11682                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11683                         return_status = 0;
11684                         *operation_result = p_notify->sc_status;
11685                 } else {
11686                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11687                 }
11688         }
11689
11690         return return_status;
11691 }
11692
11693 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11694         unsigned int link_index, void *cmd_payload, void *operation_result)
11695 {
11696         struct amdgpu_device *adev = ctx->driver_context;
11697         int ret = 0;
11698
11699         if (is_cmd_aux) {
11700                 dc_process_dmub_aux_transfer_async(ctx->dc,
11701                         link_index, (struct aux_payload *)cmd_payload);
11702         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11703                                         (struct set_config_cmd_payload *)cmd_payload,
11704                                         adev->dm.dmub_notify)) {
11705                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11706                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11707                                         (uint32_t *)operation_result);
11708         }
11709
11710         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11711         if (ret == 0) {
11712                 DRM_ERROR("wait_for_completion_timeout timeout!");
11713                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11714                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11715                                 (uint32_t *)operation_result);
11716         }
11717
11718         if (is_cmd_aux) {
11719                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11720                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11721
11722                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11723                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11724                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11725                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11726                                        adev->dm.dmub_notify->aux_reply.length);
11727                         }
11728                 }
11729         }
11730
11731         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11732                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11733                         (uint32_t *)operation_result);
11734 }
11735
11736 /*
11737  * Check whether seamless boot is supported.
11738  *
11739  * So far we only support seamless boot on CHIP_VANGOGH.
11740  * If everything goes well, we may consider expanding
11741  * seamless boot to other ASICs.
11742  */
11743 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11744 {
11745         switch (adev->asic_type) {
11746         case CHIP_VANGOGH:
11747                 if (!adev->mman.keep_stolen_vga_memory)
11748                         return true;
11749                 break;
11750         default:
11751                 break;
11752         }
11753
11754         return false;
11755 }
This page took 0.753839 seconds and 4 git commands to generate.