]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Linux 5.16-rc1
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63
64 #include "ivsrcid/ivsrcid_vislands30.h"
65
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92
93 #include "soc15_common.h"
94 #endif
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116
117 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146         switch (link->dpcd_caps.dongle_type) {
147         case DISPLAY_DONGLE_NONE:
148                 return DRM_MODE_SUBCONNECTOR_Native;
149         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150                 return DRM_MODE_SUBCONNECTOR_VGA;
151         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152         case DISPLAY_DONGLE_DP_DVI_DONGLE:
153                 return DRM_MODE_SUBCONNECTOR_DVID;
154         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156                 return DRM_MODE_SUBCONNECTOR_HDMIA;
157         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158         default:
159                 return DRM_MODE_SUBCONNECTOR_Unknown;
160         }
161 }
162
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165         struct dc_link *link = aconnector->dc_link;
166         struct drm_connector *connector = &aconnector->base;
167         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170                 return;
171
172         if (aconnector->dc_sink)
173                 subconnector = get_subconnector_type(link);
174
175         drm_object_property_set_value(&connector->base,
176                         connector->dev->mode_config.dp_subconnector_property,
177                         subconnector);
178 }
179
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192                                 struct drm_plane *plane,
193                                 unsigned long possible_crtcs,
194                                 const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196                                struct drm_plane *plane,
197                                uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
200                                     uint32_t link_index,
201                                     struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203                                   struct amdgpu_encoder *aencoder,
204                                   uint32_t link_index);
205
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211                                   struct drm_atomic_state *state);
212
213 static void handle_cursor_update(struct drm_plane *plane,
214                                  struct drm_plane_state *old_plane_state);
215
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 static void handle_hpd_rx_irq(void *param);
221
222 static bool
223 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
224                                  struct drm_crtc_state *new_crtc_state);
225 /*
226  * dm_vblank_get_counter
227  *
228  * @brief
229  * Get counter for number of vertical blanks
230  *
231  * @param
232  * struct amdgpu_device *adev - [in] desired amdgpu device
233  * int disp_idx - [in] which CRTC to get the counter from
234  *
235  * @return
236  * Counter for vertical blanks
237  */
238 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 {
240         if (crtc >= adev->mode_info.num_crtc)
241                 return 0;
242         else {
243                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244
245                 if (acrtc->dm_irq_params.stream == NULL) {
246                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247                                   crtc);
248                         return 0;
249                 }
250
251                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
252         }
253 }
254
255 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
256                                   u32 *vbl, u32 *position)
257 {
258         uint32_t v_blank_start, v_blank_end, h_position, v_position;
259
260         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261                 return -EINVAL;
262         else {
263                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264
265                 if (acrtc->dm_irq_params.stream ==  NULL) {
266                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267                                   crtc);
268                         return 0;
269                 }
270
271                 /*
272                  * TODO rework base driver to use values directly.
273                  * for now parse it back into reg-format
274                  */
275                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
276                                          &v_blank_start,
277                                          &v_blank_end,
278                                          &h_position,
279                                          &v_position);
280
281                 *position = v_position | (h_position << 16);
282                 *vbl = v_blank_start | (v_blank_end << 16);
283         }
284
285         return 0;
286 }
287
288 static bool dm_is_idle(void *handle)
289 {
290         /* XXX todo */
291         return true;
292 }
293
294 static int dm_wait_for_idle(void *handle)
295 {
296         /* XXX todo */
297         return 0;
298 }
299
300 static bool dm_check_soft_reset(void *handle)
301 {
302         return false;
303 }
304
305 static int dm_soft_reset(void *handle)
306 {
307         /* XXX todo */
308         return 0;
309 }
310
311 static struct amdgpu_crtc *
312 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313                      int otg_inst)
314 {
315         struct drm_device *dev = adev_to_drm(adev);
316         struct drm_crtc *crtc;
317         struct amdgpu_crtc *amdgpu_crtc;
318
319         if (WARN_ON(otg_inst == -1))
320                 return adev->mode_info.crtcs[0];
321
322         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323                 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325                 if (amdgpu_crtc->otg_inst == otg_inst)
326                         return amdgpu_crtc;
327         }
328
329         return NULL;
330 }
331
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334         return acrtc->dm_irq_params.freesync_config.state ==
335                        VRR_STATE_ACTIVE_VARIABLE ||
336                acrtc->dm_irq_params.freesync_config.state ==
337                        VRR_STATE_ACTIVE_FIXED;
338 }
339
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347                                               struct dm_crtc_state *new_state)
348 {
349         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350                 return true;
351         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352                 return true;
353         else
354                 return false;
355 }
356
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366         struct amdgpu_crtc *amdgpu_crtc;
367         struct common_irq_params *irq_params = interrupt_params;
368         struct amdgpu_device *adev = irq_params->adev;
369         unsigned long flags;
370         struct drm_pending_vblank_event *e;
371         uint32_t vpos, hpos, v_blank_start, v_blank_end;
372         bool vrr_active;
373
374         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376         /* IRQ could occur when in initial stage */
377         /* TODO work and BO cleanup */
378         if (amdgpu_crtc == NULL) {
379                 DC_LOG_PFLIP("CRTC is null, returning.\n");
380                 return;
381         }
382
383         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384
385         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387                                                  amdgpu_crtc->pflip_status,
388                                                  AMDGPU_FLIP_SUBMITTED,
389                                                  amdgpu_crtc->crtc_id,
390                                                  amdgpu_crtc);
391                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392                 return;
393         }
394
395         /* page flip completed. */
396         e = amdgpu_crtc->event;
397         amdgpu_crtc->event = NULL;
398
399         WARN_ON(!e);
400
401         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402
403         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404         if (!vrr_active ||
405             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
406                                       &v_blank_end, &hpos, &vpos) ||
407             (vpos < v_blank_start)) {
408                 /* Update to correct count and vblank timestamp if racing with
409                  * vblank irq. This also updates to the correct vblank timestamp
410                  * even in VRR mode, as scanout is past the front-porch atm.
411                  */
412                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413
414                 /* Wake up userspace by sending the pageflip event with proper
415                  * count and timestamp of vblank of flip completion.
416                  */
417                 if (e) {
418                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419
420                         /* Event sent, so done with vblank for this flip */
421                         drm_crtc_vblank_put(&amdgpu_crtc->base);
422                 }
423         } else if (e) {
424                 /* VRR active and inside front-porch: vblank count and
425                  * timestamp for pageflip event will only be up to date after
426                  * drm_crtc_handle_vblank() has been executed from late vblank
427                  * irq handler after start of back-porch (vline 0). We queue the
428                  * pageflip event for send-out by drm_crtc_handle_vblank() with
429                  * updated timestamp and count, once it runs after us.
430                  *
431                  * We need to open-code this instead of using the helper
432                  * drm_crtc_arm_vblank_event(), as that helper would
433                  * call drm_crtc_accurate_vblank_count(), which we must
434                  * not call in VRR mode while we are in front-porch!
435                  */
436
437                 /* sequence will be replaced by real count during send-out. */
438                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
439                 e->pipe = amdgpu_crtc->crtc_id;
440
441                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442                 e = NULL;
443         }
444
445         /* Keep track of vblank of this flip for flip throttling. We use the
446          * cooked hw counter, as that one incremented at start of this vblank
447          * of pageflip completion, so last_flip_vblank is the forbidden count
448          * for queueing new pageflips if vsync + VRR is enabled.
449          */
450         amdgpu_crtc->dm_irq_params.last_flip_vblank =
451                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452
453         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
454         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455
456         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
457                      amdgpu_crtc->crtc_id, amdgpu_crtc,
458                      vrr_active, (int) !e);
459 }
460
461 static void dm_vupdate_high_irq(void *interrupt_params)
462 {
463         struct common_irq_params *irq_params = interrupt_params;
464         struct amdgpu_device *adev = irq_params->adev;
465         struct amdgpu_crtc *acrtc;
466         struct drm_device *drm_dev;
467         struct drm_vblank_crtc *vblank;
468         ktime_t frame_duration_ns, previous_timestamp;
469         unsigned long flags;
470         int vrr_active;
471
472         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
473
474         if (acrtc) {
475                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
476                 drm_dev = acrtc->base.dev;
477                 vblank = &drm_dev->vblank[acrtc->base.index];
478                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
479                 frame_duration_ns = vblank->time - previous_timestamp;
480
481                 if (frame_duration_ns > 0) {
482                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
483                                                 frame_duration_ns,
484                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
485                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
486                 }
487
488                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489                               acrtc->crtc_id,
490                               vrr_active);
491
492                 /* Core vblank handling is done here after end of front-porch in
493                  * vrr mode, as vblank timestamping will give valid results
494                  * while now done after front-porch. This will also deliver
495                  * page-flip completion events that have been queued to us
496                  * if a pageflip happened inside front-porch.
497                  */
498                 if (vrr_active) {
499                         drm_crtc_handle_vblank(&acrtc->base);
500
501                         /* BTR processing for pre-DCE12 ASICs */
502                         if (acrtc->dm_irq_params.stream &&
503                             adev->family < AMDGPU_FAMILY_AI) {
504                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
505                                 mod_freesync_handle_v_update(
506                                     adev->dm.freesync_module,
507                                     acrtc->dm_irq_params.stream,
508                                     &acrtc->dm_irq_params.vrr_params);
509
510                                 dc_stream_adjust_vmin_vmax(
511                                     adev->dm.dc,
512                                     acrtc->dm_irq_params.stream,
513                                     &acrtc->dm_irq_params.vrr_params.adjust);
514                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
515                         }
516                 }
517         }
518 }
519
520 /**
521  * dm_crtc_high_irq() - Handles CRTC interrupt
522  * @interrupt_params: used for determining the CRTC instance
523  *
524  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
525  * event handler.
526  */
527 static void dm_crtc_high_irq(void *interrupt_params)
528 {
529         struct common_irq_params *irq_params = interrupt_params;
530         struct amdgpu_device *adev = irq_params->adev;
531         struct amdgpu_crtc *acrtc;
532         unsigned long flags;
533         int vrr_active;
534
535         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536         if (!acrtc)
537                 return;
538
539         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540
541         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
542                       vrr_active, acrtc->dm_irq_params.active_planes);
543
544         /**
545          * Core vblank handling at start of front-porch is only possible
546          * in non-vrr mode, as only there vblank timestamping will give
547          * valid results while done in front-porch. Otherwise defer it
548          * to dm_vupdate_high_irq after end of front-porch.
549          */
550         if (!vrr_active)
551                 drm_crtc_handle_vblank(&acrtc->base);
552
553         /**
554          * Following stuff must happen at start of vblank, for crc
555          * computation and below-the-range btr support in vrr mode.
556          */
557         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558
559         /* BTR updates need to happen before VUPDATE on Vega and above. */
560         if (adev->family < AMDGPU_FAMILY_AI)
561                 return;
562
563         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564
565         if (acrtc->dm_irq_params.stream &&
566             acrtc->dm_irq_params.vrr_params.supported &&
567             acrtc->dm_irq_params.freesync_config.state ==
568                     VRR_STATE_ACTIVE_VARIABLE) {
569                 mod_freesync_handle_v_update(adev->dm.freesync_module,
570                                              acrtc->dm_irq_params.stream,
571                                              &acrtc->dm_irq_params.vrr_params);
572
573                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
574                                            &acrtc->dm_irq_params.vrr_params.adjust);
575         }
576
577         /*
578          * If there aren't any active_planes then DCH HUBP may be clock-gated.
579          * In that case, pageflip completion interrupts won't fire and pageflip
580          * completion events won't get delivered. Prevent this by sending
581          * pending pageflip events from here if a flip is still pending.
582          *
583          * If any planes are enabled, use dm_pflip_high_irq() instead, to
584          * avoid race conditions between flip programming and completion,
585          * which could cause too early flip completion events.
586          */
587         if (adev->family >= AMDGPU_FAMILY_RV &&
588             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
589             acrtc->dm_irq_params.active_planes == 0) {
590                 if (acrtc->event) {
591                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592                         acrtc->event = NULL;
593                         drm_crtc_vblank_put(&acrtc->base);
594                 }
595                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
596         }
597
598         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
599 }
600
601 #if defined(CONFIG_DRM_AMD_DC_DCN)
602 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt_params: interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 {
612         struct common_irq_params *irq_params = interrupt_params;
613         struct amdgpu_device *adev = irq_params->adev;
614         struct amdgpu_crtc *acrtc;
615
616         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617
618         if (!acrtc)
619                 return;
620
621         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622 }
623 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
624
625 /**
626  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
627  * @adev: amdgpu_device pointer
628  * @notify: dmub notification structure
629  *
630  * Dmub AUX or SET_CONFIG command completion processing callback
631  * Copies dmub notification to DM which is to be read by AUX command.
632  * issuing thread and also signals the event to wake up the thread.
633  */
634 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635 {
636         if (adev->dm.dmub_notify)
637                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
638         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
639                 complete(&adev->dm.dmub_aux_transfer_done);
640 }
641
642 /**
643  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
644  * @adev: amdgpu_device pointer
645  * @notify: dmub notification structure
646  *
647  * Dmub Hpd interrupt processing callback. Gets displayindex through the
648  * ink index and calls helper to do the processing.
649  */
650 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651 {
652         struct amdgpu_dm_connector *aconnector;
653         struct amdgpu_dm_connector *hpd_aconnector = NULL;
654         struct drm_connector *connector;
655         struct drm_connector_list_iter iter;
656         struct dc_link *link;
657         uint8_t link_index = 0;
658         struct drm_device *dev = adev->dm.ddev;
659
660         if (adev == NULL)
661                 return;
662
663         if (notify == NULL) {
664                 DRM_ERROR("DMUB HPD callback notification was NULL");
665                 return;
666         }
667
668         if (notify->link_index > adev->dm.dc->link_count) {
669                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
670                 return;
671         }
672
673         link_index = notify->link_index;
674         link = adev->dm.dc->links[link_index];
675
676         drm_connector_list_iter_begin(dev, &iter);
677         drm_for_each_connector_iter(connector, &iter) {
678                 aconnector = to_amdgpu_dm_connector(connector);
679                 if (link && aconnector->dc_link == link) {
680                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681                         hpd_aconnector = aconnector;
682                         break;
683                 }
684         }
685         drm_connector_list_iter_end(&iter);
686
687         if (hpd_aconnector) {
688                 if (notify->type == DMUB_NOTIFICATION_HPD)
689                         handle_hpd_irq_helper(hpd_aconnector);
690                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
691                         handle_hpd_rx_irq(hpd_aconnector);
692         }
693 }
694
695 /**
696  * register_dmub_notify_callback - Sets callback for DMUB notify
697  * @adev: amdgpu_device pointer
698  * @type: Type of dmub notification
699  * @callback: Dmub interrupt callback function
700  * @dmub_int_thread_offload: offload indicator
701  *
702  * API to register a dmub callback handler for a dmub notification
703  * Also sets indicator whether callback processing to be offloaded.
704  * to dmub interrupt handling thread
705  * Return: true if successfully registered, false if there is existing registration
706  */
707 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
708 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709 {
710         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711                 adev->dm.dmub_callback[type] = callback;
712                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713         } else
714                 return false;
715
716         return true;
717 }
718
719 static void dm_handle_hpd_work(struct work_struct *work)
720 {
721         struct dmub_hpd_work *dmub_hpd_wrk;
722
723         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724
725         if (!dmub_hpd_wrk->dmub_notify) {
726                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727                 return;
728         }
729
730         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732                 dmub_hpd_wrk->dmub_notify);
733         }
734
735         kfree(dmub_hpd_wrk->dmub_notify);
736         kfree(dmub_hpd_wrk);
737
738 }
739
740 #define DMUB_TRACE_MAX_READ 64
741 /**
742  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743  * @interrupt_params: used for determining the Outbox instance
744  *
745  * Handles the Outbox Interrupt
746  * event handler.
747  */
748 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749 {
750         struct dmub_notification notify;
751         struct common_irq_params *irq_params = interrupt_params;
752         struct amdgpu_device *adev = irq_params->adev;
753         struct amdgpu_display_manager *dm = &adev->dm;
754         struct dmcub_trace_buf_entry entry = { 0 };
755         uint32_t count = 0;
756         struct dmub_hpd_work *dmub_hpd_wrk;
757         struct dc_link *plink = NULL;
758
759         if (dc_enable_dmub_notifications(adev->dm.dc) &&
760                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
761
762                 do {
763                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
765                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
766                                 continue;
767                         }
768                         if (!dm->dmub_callback[notify.type]) {
769                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770                                 continue;
771                         }
772                         if (dm->dmub_thread_offload[notify.type] == true) {
773                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774                                 if (!dmub_hpd_wrk) {
775                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776                                         return;
777                                 }
778                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779                                 if (!dmub_hpd_wrk->dmub_notify) {
780                                         kfree(dmub_hpd_wrk);
781                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782                                         return;
783                                 }
784                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785                                 if (dmub_hpd_wrk->dmub_notify)
786                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
787                                 dmub_hpd_wrk->adev = adev;
788                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
789                                         plink = adev->dm.dc->links[notify.link_index];
790                                         if (plink) {
791                                                 plink->hpd_status =
792                                                         notify.hpd_status ==
793                                                         DP_HPD_PLUG ? true : false;
794                                         }
795                                 }
796                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797                         } else {
798                                 dm->dmub_callback[notify.type](adev, &notify);
799                         }
800                 } while (notify.pending_notification);
801         }
802
803
804         do {
805                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807                                                         entry.param0, entry.param1);
808
809                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811                 } else
812                         break;
813
814                 count++;
815
816         } while (count <= DMUB_TRACE_MAX_READ);
817
818         if (count > DMUB_TRACE_MAX_READ)
819                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820 }
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
822
823 static int dm_set_clockgating_state(void *handle,
824                   enum amd_clockgating_state state)
825 {
826         return 0;
827 }
828
829 static int dm_set_powergating_state(void *handle,
830                   enum amd_powergating_state state)
831 {
832         return 0;
833 }
834
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
837
838 /* Allocate memory for FBC compressed data  */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840 {
841         struct drm_device *dev = connector->dev;
842         struct amdgpu_device *adev = drm_to_adev(dev);
843         struct dm_compressor_info *compressor = &adev->dm.compressor;
844         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845         struct drm_display_mode *mode;
846         unsigned long max_size = 0;
847
848         if (adev->dm.dc->fbc_compressor == NULL)
849                 return;
850
851         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852                 return;
853
854         if (compressor->bo_ptr)
855                 return;
856
857
858         list_for_each_entry(mode, &connector->modes, head) {
859                 if (max_size < mode->htotal * mode->vtotal)
860                         max_size = mode->htotal * mode->vtotal;
861         }
862
863         if (max_size) {
864                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866                             &compressor->gpu_addr, &compressor->cpu_addr);
867
868                 if (r)
869                         DRM_ERROR("DM: Failed to initialize FBC\n");
870                 else {
871                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873                 }
874
875         }
876
877 }
878
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880                                           int pipe, bool *enabled,
881                                           unsigned char *buf, int max_bytes)
882 {
883         struct drm_device *dev = dev_get_drvdata(kdev);
884         struct amdgpu_device *adev = drm_to_adev(dev);
885         struct drm_connector *connector;
886         struct drm_connector_list_iter conn_iter;
887         struct amdgpu_dm_connector *aconnector;
888         int ret = 0;
889
890         *enabled = false;
891
892         mutex_lock(&adev->dm.audio_lock);
893
894         drm_connector_list_iter_begin(dev, &conn_iter);
895         drm_for_each_connector_iter(connector, &conn_iter) {
896                 aconnector = to_amdgpu_dm_connector(connector);
897                 if (aconnector->audio_inst != port)
898                         continue;
899
900                 *enabled = true;
901                 ret = drm_eld_size(connector->eld);
902                 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904                 break;
905         }
906         drm_connector_list_iter_end(&conn_iter);
907
908         mutex_unlock(&adev->dm.audio_lock);
909
910         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912         return ret;
913 }
914
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916         .get_eld = amdgpu_dm_audio_component_get_eld,
917 };
918
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920                                        struct device *hda_kdev, void *data)
921 {
922         struct drm_device *dev = dev_get_drvdata(kdev);
923         struct amdgpu_device *adev = drm_to_adev(dev);
924         struct drm_audio_component *acomp = data;
925
926         acomp->ops = &amdgpu_dm_audio_component_ops;
927         acomp->dev = kdev;
928         adev->dm.audio_component = acomp;
929
930         return 0;
931 }
932
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934                                           struct device *hda_kdev, void *data)
935 {
936         struct drm_device *dev = dev_get_drvdata(kdev);
937         struct amdgpu_device *adev = drm_to_adev(dev);
938         struct drm_audio_component *acomp = data;
939
940         acomp->ops = NULL;
941         acomp->dev = NULL;
942         adev->dm.audio_component = NULL;
943 }
944
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946         .bind   = amdgpu_dm_audio_component_bind,
947         .unbind = amdgpu_dm_audio_component_unbind,
948 };
949
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951 {
952         int i, ret;
953
954         if (!amdgpu_audio)
955                 return 0;
956
957         adev->mode_info.audio.enabled = true;
958
959         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962                 adev->mode_info.audio.pin[i].channels = -1;
963                 adev->mode_info.audio.pin[i].rate = -1;
964                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965                 adev->mode_info.audio.pin[i].status_bits = 0;
966                 adev->mode_info.audio.pin[i].category_code = 0;
967                 adev->mode_info.audio.pin[i].connected = false;
968                 adev->mode_info.audio.pin[i].id =
969                         adev->dm.dc->res_pool->audios[i]->inst;
970                 adev->mode_info.audio.pin[i].offset = 0;
971         }
972
973         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974         if (ret < 0)
975                 return ret;
976
977         adev->dm.audio_registered = true;
978
979         return 0;
980 }
981
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983 {
984         if (!amdgpu_audio)
985                 return;
986
987         if (!adev->mode_info.audio.enabled)
988                 return;
989
990         if (adev->dm.audio_registered) {
991                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992                 adev->dm.audio_registered = false;
993         }
994
995         /* TODO: Disable audio? */
996
997         adev->mode_info.audio.enabled = false;
998 }
999
1000 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001 {
1002         struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008                                                  pin, -1);
1009         }
1010 }
1011
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013 {
1014         const struct dmcub_firmware_header_v1_0 *hdr;
1015         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019         struct abm *abm = adev->dm.dc->res_pool->abm;
1020         struct dmub_srv_hw_params hw_params;
1021         enum dmub_status status;
1022         const unsigned char *fw_inst_const, *fw_bss_data;
1023         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024         bool has_hw_support;
1025         struct dc *dc = adev->dm.dc;
1026
1027         if (!dmub_srv)
1028                 /* DMUB isn't supported on the ASIC. */
1029                 return 0;
1030
1031         if (!fb_info) {
1032                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033                 return -EINVAL;
1034         }
1035
1036         if (!dmub_fw) {
1037                 /* Firmware required for DMUB support. */
1038                 DRM_ERROR("No firmware provided for DMUB.\n");
1039                 return -EINVAL;
1040         }
1041
1042         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043         if (status != DMUB_STATUS_OK) {
1044                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045                 return -EINVAL;
1046         }
1047
1048         if (!has_hw_support) {
1049                 DRM_INFO("DMUB unsupported on ASIC\n");
1050                 return 0;
1051         }
1052
1053         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
1055         fw_inst_const = dmub_fw->data +
1056                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057                         PSP_HEADER_BYTES;
1058
1059         fw_bss_data = dmub_fw->data +
1060                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061                       le32_to_cpu(hdr->inst_const_bytes);
1062
1063         /* Copy firmware and bios info into FB memory. */
1064         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
1069         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070          * amdgpu_ucode_init_single_fw will load dmub firmware
1071          * fw_inst_const part to cw0; otherwise, the firmware back door load
1072          * will be done by dm_dmub_hw_init
1073          */
1074         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076                                 fw_inst_const_size);
1077         }
1078
1079         if (fw_bss_data_size)
1080                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081                        fw_bss_data, fw_bss_data_size);
1082
1083         /* Copy firmware bios info into FB memory. */
1084         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085                adev->bios_size);
1086
1087         /* Reset regions that need to be reset. */
1088         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096
1097         /* Initialize hardware. */
1098         memset(&hw_params, 0, sizeof(hw_params));
1099         hw_params.fb_base = adev->gmc.fb_start;
1100         hw_params.fb_offset = adev->gmc.aper_base;
1101
1102         /* backdoor load firmware and trigger dmub running */
1103         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104                 hw_params.load_inst_const = true;
1105
1106         if (dmcu)
1107                 hw_params.psp_version = dmcu->psp_version;
1108
1109         for (i = 0; i < fb_info->num_fb; ++i)
1110                 hw_params.fb[i] = &fb_info->fb[i];
1111
1112         switch (adev->asic_type) {
1113         case CHIP_YELLOW_CARP:
1114                 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115                         hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117                         hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118 #endif
1119                 }
1120                 break;
1121         default:
1122                 break;
1123         }
1124
1125         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126         if (status != DMUB_STATUS_OK) {
1127                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128                 return -EINVAL;
1129         }
1130
1131         /* Wait for firmware load to finish. */
1132         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133         if (status != DMUB_STATUS_OK)
1134                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136         /* Init DMCU and ABM if available. */
1137         if (dmcu && abm) {
1138                 dmcu->funcs->dmcu_init(dmcu);
1139                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140         }
1141
1142         if (!adev->dm.dc->ctx->dmub_srv)
1143                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144         if (!adev->dm.dc->ctx->dmub_srv) {
1145                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146                 return -ENOMEM;
1147         }
1148
1149         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150                  adev->dm.dmcub_fw_version);
1151
1152         return 0;
1153 }
1154
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157 {
1158         uint64_t pt_base;
1159         uint32_t logical_addr_low;
1160         uint32_t logical_addr_high;
1161         uint32_t agp_base, agp_bot, agp_top;
1162         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163
1164         memset(pa_config, 0, sizeof(*pa_config));
1165
1166         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168
1169         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170                 /*
1171                  * Raven2 has a HW issue that it is unable to use the vram which
1172                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173                  * workaround that increase system aperture high address (add 1)
1174                  * to get rid of the VM fault and hardware hang.
1175                  */
1176                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177         else
1178                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179
1180         agp_base = 0;
1181         agp_bot = adev->gmc.agp_start >> 24;
1182         agp_top = adev->gmc.agp_end >> 24;
1183
1184
1185         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190         page_table_base.low_part = lower_32_bits(pt_base);
1191
1192         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207         pa_config->is_hvm_enabled = 0;
1208
1209 }
1210 #endif
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1213 {
1214         struct vblank_control_work *vblank_work =
1215                 container_of(work, struct vblank_control_work, work);
1216         struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218         mutex_lock(&dm->dc_lock);
1219
1220         if (vblank_work->enable)
1221                 dm->active_vblank_irq_count++;
1222         else if(dm->active_vblank_irq_count)
1223                 dm->active_vblank_irq_count--;
1224
1225         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226
1227         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228
1229         /* Control PSR based on vblank requirements from OS */
1230         if (vblank_work->stream && vblank_work->stream->link) {
1231                 if (vblank_work->enable) {
1232                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233                                 amdgpu_dm_psr_disable(vblank_work->stream);
1234                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237                         amdgpu_dm_psr_enable(vblank_work->stream);
1238                 }
1239         }
1240
1241         mutex_unlock(&dm->dc_lock);
1242
1243         dc_stream_release(vblank_work->stream);
1244
1245         kfree(vblank_work);
1246 }
1247
1248 #endif
1249
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251 {
1252         struct hpd_rx_irq_offload_work *offload_work;
1253         struct amdgpu_dm_connector *aconnector;
1254         struct dc_link *dc_link;
1255         struct amdgpu_device *adev;
1256         enum dc_connection_type new_connection_type = dc_connection_none;
1257         unsigned long flags;
1258
1259         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260         aconnector = offload_work->offload_wq->aconnector;
1261
1262         if (!aconnector) {
1263                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264                 goto skip;
1265         }
1266
1267         adev = drm_to_adev(aconnector->base.dev);
1268         dc_link = aconnector->dc_link;
1269
1270         mutex_lock(&aconnector->hpd_lock);
1271         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272                 DRM_ERROR("KMS: Failed to detect connector\n");
1273         mutex_unlock(&aconnector->hpd_lock);
1274
1275         if (new_connection_type == dc_connection_none)
1276                 goto skip;
1277
1278         if (amdgpu_in_reset(adev))
1279                 goto skip;
1280
1281         mutex_lock(&adev->dm.dc_lock);
1282         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283                 dc_link_dp_handle_automated_test(dc_link);
1284         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287                 dc_link_dp_handle_link_loss(dc_link);
1288                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289                 offload_work->offload_wq->is_handling_link_loss = false;
1290                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291         }
1292         mutex_unlock(&adev->dm.dc_lock);
1293
1294 skip:
1295         kfree(offload_work);
1296
1297 }
1298
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300 {
1301         int max_caps = dc->caps.max_links;
1302         int i = 0;
1303         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307         if (!hpd_rx_offload_wq)
1308                 return NULL;
1309
1310
1311         for (i = 0; i < max_caps; i++) {
1312                 hpd_rx_offload_wq[i].wq =
1313                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315                 if (hpd_rx_offload_wq[i].wq == NULL) {
1316                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317                         return NULL;
1318                 }
1319
1320                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321         }
1322
1323         return hpd_rx_offload_wq;
1324 }
1325
1326 struct amdgpu_stutter_quirk {
1327         u16 chip_vendor;
1328         u16 chip_device;
1329         u16 subsys_vendor;
1330         u16 subsys_device;
1331         u8 revision;
1332 };
1333
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337         { 0, 0, 0, 0, 0 },
1338 };
1339
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341 {
1342         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344         while (p && p->chip_device != 0) {
1345                 if (pdev->vendor == p->chip_vendor &&
1346                     pdev->device == p->chip_device &&
1347                     pdev->subsystem_vendor == p->subsys_vendor &&
1348                     pdev->subsystem_device == p->subsys_device &&
1349                     pdev->revision == p->revision) {
1350                         return true;
1351                 }
1352                 ++p;
1353         }
1354         return false;
1355 }
1356
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1358 {
1359         struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361         struct dc_callback_init init_params;
1362 #endif
1363         int r;
1364
1365         adev->dm.ddev = adev_to_drm(adev);
1366         adev->dm.adev = adev;
1367
1368         /* Zero all the fields */
1369         memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371         memset(&init_params, 0, sizeof(init_params));
1372 #endif
1373
1374         mutex_init(&adev->dm.dc_lock);
1375         mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377         spin_lock_init(&adev->dm.vblank_lock);
1378 #endif
1379
1380         if(amdgpu_dm_irq_init(adev)) {
1381                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382                 goto error;
1383         }
1384
1385         init_data.asic_id.chip_family = adev->family;
1386
1387         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389         init_data.asic_id.chip_id = adev->pdev->device;
1390
1391         init_data.asic_id.vram_width = adev->gmc.vram_width;
1392         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393         init_data.asic_id.atombios_base_address =
1394                 adev->mode_info.atom_context->bios;
1395
1396         init_data.driver = adev;
1397
1398         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400         if (!adev->dm.cgs_device) {
1401                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402                 goto error;
1403         }
1404
1405         init_data.cgs_device = adev->dm.cgs_device;
1406
1407         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
1409         switch (adev->asic_type) {
1410         case CHIP_CARRIZO:
1411         case CHIP_STONEY:
1412                 init_data.flags.gpu_vm_support = true;
1413                 break;
1414         default:
1415                 switch (adev->ip_versions[DCE_HWIP][0]) {
1416                 case IP_VERSION(2, 1, 0):
1417                         init_data.flags.gpu_vm_support = true;
1418                         switch (adev->dm.dmcub_fw_version) {
1419                         case 0: /* development */
1420                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1421                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422                                 init_data.flags.disable_dmcu = false;
1423                                 break;
1424                         default:
1425                                 init_data.flags.disable_dmcu = true;
1426                         }
1427                         break;
1428                 case IP_VERSION(1, 0, 0):
1429                 case IP_VERSION(1, 0, 1):
1430                 case IP_VERSION(3, 0, 1):
1431                 case IP_VERSION(3, 1, 2):
1432                 case IP_VERSION(3, 1, 3):
1433                         init_data.flags.gpu_vm_support = true;
1434                         break;
1435                 case IP_VERSION(2, 0, 3):
1436                         init_data.flags.disable_dmcu = true;
1437                         break;
1438                 default:
1439                         break;
1440                 }
1441                 break;
1442         }
1443
1444         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445                 init_data.flags.fbc_support = true;
1446
1447         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448                 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
1450         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451                 init_data.flags.disable_fractional_pwm = true;
1452
1453         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454                 init_data.flags.edp_no_power_sequencing = true;
1455
1456         init_data.flags.power_down_display_on_boot = true;
1457
1458         INIT_LIST_HEAD(&adev->dm.da_list);
1459         /* Display Core create. */
1460         adev->dm.dc = dc_create(&init_data);
1461
1462         if (adev->dm.dc) {
1463                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1464         } else {
1465                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1466                 goto error;
1467         }
1468
1469         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1470                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1471                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1472         }
1473
1474         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1475                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1476         if (dm_should_disable_stutter(adev->pdev))
1477                 adev->dm.dc->debug.disable_stutter = true;
1478
1479         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1480                 adev->dm.dc->debug.disable_stutter = true;
1481
1482         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1483                 adev->dm.dc->debug.disable_dsc = true;
1484
1485         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1486                 adev->dm.dc->debug.disable_clock_gate = true;
1487
1488         r = dm_dmub_hw_init(adev);
1489         if (r) {
1490                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1491                 goto error;
1492         }
1493
1494         dc_hardware_init(adev->dm.dc);
1495
1496         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1497         if (!adev->dm.hpd_rx_offload_wq) {
1498                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1499                 goto error;
1500         }
1501
1502 #if defined(CONFIG_DRM_AMD_DC_DCN)
1503         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1504                 struct dc_phy_addr_space_config pa_config;
1505
1506                 mmhub_read_system_context(adev, &pa_config);
1507
1508                 // Call the DC init_memory func
1509                 dc_setup_system_context(adev->dm.dc, &pa_config);
1510         }
1511 #endif
1512
1513         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1514         if (!adev->dm.freesync_module) {
1515                 DRM_ERROR(
1516                 "amdgpu: failed to initialize freesync_module.\n");
1517         } else
1518                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1519                                 adev->dm.freesync_module);
1520
1521         amdgpu_dm_init_color_mod();
1522
1523 #if defined(CONFIG_DRM_AMD_DC_DCN)
1524         if (adev->dm.dc->caps.max_links > 0) {
1525                 adev->dm.vblank_control_workqueue =
1526                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1527                 if (!adev->dm.vblank_control_workqueue)
1528                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1529         }
1530 #endif
1531
1532 #ifdef CONFIG_DRM_AMD_DC_HDCP
1533         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1534                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1535
1536                 if (!adev->dm.hdcp_workqueue)
1537                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1538                 else
1539                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1540
1541                 dc_init_callbacks(adev->dm.dc, &init_params);
1542         }
1543 #endif
1544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1545         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1546 #endif
1547         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1548                 init_completion(&adev->dm.dmub_aux_transfer_done);
1549                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1550                 if (!adev->dm.dmub_notify) {
1551                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1552                         goto error;
1553                 }
1554
1555                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1556                 if (!adev->dm.delayed_hpd_wq) {
1557                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1558                         goto error;
1559                 }
1560
1561                 amdgpu_dm_outbox_init(adev);
1562 #if defined(CONFIG_DRM_AMD_DC_DCN)
1563                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1564                         dmub_aux_setconfig_callback, false)) {
1565                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1566                         goto error;
1567                 }
1568                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1569                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1570                         goto error;
1571                 }
1572                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1573                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1574                         goto error;
1575                 }
1576 #endif /* CONFIG_DRM_AMD_DC_DCN */
1577         }
1578
1579         if (amdgpu_dm_initialize_drm_device(adev)) {
1580                 DRM_ERROR(
1581                 "amdgpu: failed to initialize sw for display support.\n");
1582                 goto error;
1583         }
1584
1585         /* create fake encoders for MST */
1586         dm_dp_create_fake_mst_encoders(adev);
1587
1588         /* TODO: Add_display_info? */
1589
1590         /* TODO use dynamic cursor width */
1591         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1592         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1593
1594         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1595                 DRM_ERROR(
1596                 "amdgpu: failed to initialize sw for display support.\n");
1597                 goto error;
1598         }
1599
1600
1601         DRM_DEBUG_DRIVER("KMS initialized.\n");
1602
1603         return 0;
1604 error:
1605         amdgpu_dm_fini(adev);
1606
1607         return -EINVAL;
1608 }
1609
1610 static int amdgpu_dm_early_fini(void *handle)
1611 {
1612         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613
1614         amdgpu_dm_audio_fini(adev);
1615
1616         return 0;
1617 }
1618
1619 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1620 {
1621         int i;
1622
1623 #if defined(CONFIG_DRM_AMD_DC_DCN)
1624         if (adev->dm.vblank_control_workqueue) {
1625                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1626                 adev->dm.vblank_control_workqueue = NULL;
1627         }
1628 #endif
1629
1630         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1631                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1632         }
1633
1634         amdgpu_dm_destroy_drm_device(&adev->dm);
1635
1636 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1637         if (adev->dm.crc_rd_wrk) {
1638                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1639                 kfree(adev->dm.crc_rd_wrk);
1640                 adev->dm.crc_rd_wrk = NULL;
1641         }
1642 #endif
1643 #ifdef CONFIG_DRM_AMD_DC_HDCP
1644         if (adev->dm.hdcp_workqueue) {
1645                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1646                 adev->dm.hdcp_workqueue = NULL;
1647         }
1648
1649         if (adev->dm.dc)
1650                 dc_deinit_callbacks(adev->dm.dc);
1651 #endif
1652
1653         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1654
1655         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1656                 kfree(adev->dm.dmub_notify);
1657                 adev->dm.dmub_notify = NULL;
1658                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1659                 adev->dm.delayed_hpd_wq = NULL;
1660         }
1661
1662         if (adev->dm.dmub_bo)
1663                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1664                                       &adev->dm.dmub_bo_gpu_addr,
1665                                       &adev->dm.dmub_bo_cpu_addr);
1666
1667         if (adev->dm.hpd_rx_offload_wq) {
1668                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1669                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1670                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1671                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1672                         }
1673                 }
1674
1675                 kfree(adev->dm.hpd_rx_offload_wq);
1676                 adev->dm.hpd_rx_offload_wq = NULL;
1677         }
1678
1679         /* DC Destroy TODO: Replace destroy DAL */
1680         if (adev->dm.dc)
1681                 dc_destroy(&adev->dm.dc);
1682         /*
1683          * TODO: pageflip, vlank interrupt
1684          *
1685          * amdgpu_dm_irq_fini(adev);
1686          */
1687
1688         if (adev->dm.cgs_device) {
1689                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1690                 adev->dm.cgs_device = NULL;
1691         }
1692         if (adev->dm.freesync_module) {
1693                 mod_freesync_destroy(adev->dm.freesync_module);
1694                 adev->dm.freesync_module = NULL;
1695         }
1696
1697         mutex_destroy(&adev->dm.audio_lock);
1698         mutex_destroy(&adev->dm.dc_lock);
1699
1700         return;
1701 }
1702
1703 static int load_dmcu_fw(struct amdgpu_device *adev)
1704 {
1705         const char *fw_name_dmcu = NULL;
1706         int r;
1707         const struct dmcu_firmware_header_v1_0 *hdr;
1708
1709         switch(adev->asic_type) {
1710 #if defined(CONFIG_DRM_AMD_DC_SI)
1711         case CHIP_TAHITI:
1712         case CHIP_PITCAIRN:
1713         case CHIP_VERDE:
1714         case CHIP_OLAND:
1715 #endif
1716         case CHIP_BONAIRE:
1717         case CHIP_HAWAII:
1718         case CHIP_KAVERI:
1719         case CHIP_KABINI:
1720         case CHIP_MULLINS:
1721         case CHIP_TONGA:
1722         case CHIP_FIJI:
1723         case CHIP_CARRIZO:
1724         case CHIP_STONEY:
1725         case CHIP_POLARIS11:
1726         case CHIP_POLARIS10:
1727         case CHIP_POLARIS12:
1728         case CHIP_VEGAM:
1729         case CHIP_VEGA10:
1730         case CHIP_VEGA12:
1731         case CHIP_VEGA20:
1732                 return 0;
1733         case CHIP_NAVI12:
1734                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1735                 break;
1736         case CHIP_RAVEN:
1737                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1738                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1739                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1740                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1741                 else
1742                         return 0;
1743                 break;
1744         default:
1745                 switch (adev->ip_versions[DCE_HWIP][0]) {
1746                 case IP_VERSION(2, 0, 2):
1747                 case IP_VERSION(2, 0, 3):
1748                 case IP_VERSION(2, 0, 0):
1749                 case IP_VERSION(2, 1, 0):
1750                 case IP_VERSION(3, 0, 0):
1751                 case IP_VERSION(3, 0, 2):
1752                 case IP_VERSION(3, 0, 3):
1753                 case IP_VERSION(3, 0, 1):
1754                 case IP_VERSION(3, 1, 2):
1755                 case IP_VERSION(3, 1, 3):
1756                         return 0;
1757                 default:
1758                         break;
1759                 }
1760                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1761                 return -EINVAL;
1762         }
1763
1764         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1765                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1766                 return 0;
1767         }
1768
1769         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1770         if (r == -ENOENT) {
1771                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1772                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1773                 adev->dm.fw_dmcu = NULL;
1774                 return 0;
1775         }
1776         if (r) {
1777                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1778                         fw_name_dmcu);
1779                 return r;
1780         }
1781
1782         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1783         if (r) {
1784                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1785                         fw_name_dmcu);
1786                 release_firmware(adev->dm.fw_dmcu);
1787                 adev->dm.fw_dmcu = NULL;
1788                 return r;
1789         }
1790
1791         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1792         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1793         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1794         adev->firmware.fw_size +=
1795                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1796
1797         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1798         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1799         adev->firmware.fw_size +=
1800                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1801
1802         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1803
1804         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1805
1806         return 0;
1807 }
1808
1809 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1810 {
1811         struct amdgpu_device *adev = ctx;
1812
1813         return dm_read_reg(adev->dm.dc->ctx, address);
1814 }
1815
1816 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1817                                      uint32_t value)
1818 {
1819         struct amdgpu_device *adev = ctx;
1820
1821         return dm_write_reg(adev->dm.dc->ctx, address, value);
1822 }
1823
1824 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1825 {
1826         struct dmub_srv_create_params create_params;
1827         struct dmub_srv_region_params region_params;
1828         struct dmub_srv_region_info region_info;
1829         struct dmub_srv_fb_params fb_params;
1830         struct dmub_srv_fb_info *fb_info;
1831         struct dmub_srv *dmub_srv;
1832         const struct dmcub_firmware_header_v1_0 *hdr;
1833         const char *fw_name_dmub;
1834         enum dmub_asic dmub_asic;
1835         enum dmub_status status;
1836         int r;
1837
1838         switch (adev->ip_versions[DCE_HWIP][0]) {
1839         case IP_VERSION(2, 1, 0):
1840                 dmub_asic = DMUB_ASIC_DCN21;
1841                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1842                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1843                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1844                 break;
1845         case IP_VERSION(3, 0, 0):
1846                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1847                         dmub_asic = DMUB_ASIC_DCN30;
1848                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1849                 } else {
1850                         dmub_asic = DMUB_ASIC_DCN30;
1851                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1852                 }
1853                 break;
1854         case IP_VERSION(3, 0, 1):
1855                 dmub_asic = DMUB_ASIC_DCN301;
1856                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1857                 break;
1858         case IP_VERSION(3, 0, 2):
1859                 dmub_asic = DMUB_ASIC_DCN302;
1860                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1861                 break;
1862         case IP_VERSION(3, 0, 3):
1863                 dmub_asic = DMUB_ASIC_DCN303;
1864                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1865                 break;
1866         case IP_VERSION(3, 1, 2):
1867         case IP_VERSION(3, 1, 3):
1868                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1869                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1870                 break;
1871
1872         default:
1873                 /* ASIC doesn't support DMUB. */
1874                 return 0;
1875         }
1876
1877         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1878         if (r) {
1879                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1880                 return 0;
1881         }
1882
1883         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1884         if (r) {
1885                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1886                 return 0;
1887         }
1888
1889         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1890         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1891
1892         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1893                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1894                         AMDGPU_UCODE_ID_DMCUB;
1895                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1896                         adev->dm.dmub_fw;
1897                 adev->firmware.fw_size +=
1898                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1899
1900                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1901                          adev->dm.dmcub_fw_version);
1902         }
1903
1904
1905         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1906         dmub_srv = adev->dm.dmub_srv;
1907
1908         if (!dmub_srv) {
1909                 DRM_ERROR("Failed to allocate DMUB service!\n");
1910                 return -ENOMEM;
1911         }
1912
1913         memset(&create_params, 0, sizeof(create_params));
1914         create_params.user_ctx = adev;
1915         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1916         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1917         create_params.asic = dmub_asic;
1918
1919         /* Create the DMUB service. */
1920         status = dmub_srv_create(dmub_srv, &create_params);
1921         if (status != DMUB_STATUS_OK) {
1922                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1923                 return -EINVAL;
1924         }
1925
1926         /* Calculate the size of all the regions for the DMUB service. */
1927         memset(&region_params, 0, sizeof(region_params));
1928
1929         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1930                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1931         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1932         region_params.vbios_size = adev->bios_size;
1933         region_params.fw_bss_data = region_params.bss_data_size ?
1934                 adev->dm.dmub_fw->data +
1935                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1936                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1937         region_params.fw_inst_const =
1938                 adev->dm.dmub_fw->data +
1939                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1940                 PSP_HEADER_BYTES;
1941
1942         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1943                                            &region_info);
1944
1945         if (status != DMUB_STATUS_OK) {
1946                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1947                 return -EINVAL;
1948         }
1949
1950         /*
1951          * Allocate a framebuffer based on the total size of all the regions.
1952          * TODO: Move this into GART.
1953          */
1954         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1955                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1956                                     &adev->dm.dmub_bo_gpu_addr,
1957                                     &adev->dm.dmub_bo_cpu_addr);
1958         if (r)
1959                 return r;
1960
1961         /* Rebase the regions on the framebuffer address. */
1962         memset(&fb_params, 0, sizeof(fb_params));
1963         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1964         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1965         fb_params.region_info = &region_info;
1966
1967         adev->dm.dmub_fb_info =
1968                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1969         fb_info = adev->dm.dmub_fb_info;
1970
1971         if (!fb_info) {
1972                 DRM_ERROR(
1973                         "Failed to allocate framebuffer info for DMUB service!\n");
1974                 return -ENOMEM;
1975         }
1976
1977         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1978         if (status != DMUB_STATUS_OK) {
1979                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1980                 return -EINVAL;
1981         }
1982
1983         return 0;
1984 }
1985
1986 static int dm_sw_init(void *handle)
1987 {
1988         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1989         int r;
1990
1991         r = dm_dmub_sw_init(adev);
1992         if (r)
1993                 return r;
1994
1995         return load_dmcu_fw(adev);
1996 }
1997
1998 static int dm_sw_fini(void *handle)
1999 {
2000         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2001
2002         kfree(adev->dm.dmub_fb_info);
2003         adev->dm.dmub_fb_info = NULL;
2004
2005         if (adev->dm.dmub_srv) {
2006                 dmub_srv_destroy(adev->dm.dmub_srv);
2007                 adev->dm.dmub_srv = NULL;
2008         }
2009
2010         release_firmware(adev->dm.dmub_fw);
2011         adev->dm.dmub_fw = NULL;
2012
2013         release_firmware(adev->dm.fw_dmcu);
2014         adev->dm.fw_dmcu = NULL;
2015
2016         return 0;
2017 }
2018
2019 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2020 {
2021         struct amdgpu_dm_connector *aconnector;
2022         struct drm_connector *connector;
2023         struct drm_connector_list_iter iter;
2024         int ret = 0;
2025
2026         drm_connector_list_iter_begin(dev, &iter);
2027         drm_for_each_connector_iter(connector, &iter) {
2028                 aconnector = to_amdgpu_dm_connector(connector);
2029                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2030                     aconnector->mst_mgr.aux) {
2031                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2032                                          aconnector,
2033                                          aconnector->base.base.id);
2034
2035                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2036                         if (ret < 0) {
2037                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2038                                 aconnector->dc_link->type =
2039                                         dc_connection_single;
2040                                 break;
2041                         }
2042                 }
2043         }
2044         drm_connector_list_iter_end(&iter);
2045
2046         return ret;
2047 }
2048
2049 static int dm_late_init(void *handle)
2050 {
2051         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2052
2053         struct dmcu_iram_parameters params;
2054         unsigned int linear_lut[16];
2055         int i;
2056         struct dmcu *dmcu = NULL;
2057
2058         dmcu = adev->dm.dc->res_pool->dmcu;
2059
2060         for (i = 0; i < 16; i++)
2061                 linear_lut[i] = 0xFFFF * i / 15;
2062
2063         params.set = 0;
2064         params.backlight_ramping_override = false;
2065         params.backlight_ramping_start = 0xCCCC;
2066         params.backlight_ramping_reduction = 0xCCCCCCCC;
2067         params.backlight_lut_array_size = 16;
2068         params.backlight_lut_array = linear_lut;
2069
2070         /* Min backlight level after ABM reduction,  Don't allow below 1%
2071          * 0xFFFF x 0.01 = 0x28F
2072          */
2073         params.min_abm_backlight = 0x28F;
2074         /* In the case where abm is implemented on dmcub,
2075         * dmcu object will be null.
2076         * ABM 2.4 and up are implemented on dmcub.
2077         */
2078         if (dmcu) {
2079                 if (!dmcu_load_iram(dmcu, params))
2080                         return -EINVAL;
2081         } else if (adev->dm.dc->ctx->dmub_srv) {
2082                 struct dc_link *edp_links[MAX_NUM_EDP];
2083                 int edp_num;
2084
2085                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2086                 for (i = 0; i < edp_num; i++) {
2087                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2088                                 return -EINVAL;
2089                 }
2090         }
2091
2092         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2093 }
2094
2095 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2096 {
2097         struct amdgpu_dm_connector *aconnector;
2098         struct drm_connector *connector;
2099         struct drm_connector_list_iter iter;
2100         struct drm_dp_mst_topology_mgr *mgr;
2101         int ret;
2102         bool need_hotplug = false;
2103
2104         drm_connector_list_iter_begin(dev, &iter);
2105         drm_for_each_connector_iter(connector, &iter) {
2106                 aconnector = to_amdgpu_dm_connector(connector);
2107                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2108                     aconnector->mst_port)
2109                         continue;
2110
2111                 mgr = &aconnector->mst_mgr;
2112
2113                 if (suspend) {
2114                         drm_dp_mst_topology_mgr_suspend(mgr);
2115                 } else {
2116                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2117                         if (ret < 0) {
2118                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2119                                 need_hotplug = true;
2120                         }
2121                 }
2122         }
2123         drm_connector_list_iter_end(&iter);
2124
2125         if (need_hotplug)
2126                 drm_kms_helper_hotplug_event(dev);
2127 }
2128
2129 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2130 {
2131         struct smu_context *smu = &adev->smu;
2132         int ret = 0;
2133
2134         if (!is_support_sw_smu(adev))
2135                 return 0;
2136
2137         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2138          * on window driver dc implementation.
2139          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2140          * should be passed to smu during boot up and resume from s3.
2141          * boot up: dc calculate dcn watermark clock settings within dc_create,
2142          * dcn20_resource_construct
2143          * then call pplib functions below to pass the settings to smu:
2144          * smu_set_watermarks_for_clock_ranges
2145          * smu_set_watermarks_table
2146          * navi10_set_watermarks_table
2147          * smu_write_watermarks_table
2148          *
2149          * For Renoir, clock settings of dcn watermark are also fixed values.
2150          * dc has implemented different flow for window driver:
2151          * dc_hardware_init / dc_set_power_state
2152          * dcn10_init_hw
2153          * notify_wm_ranges
2154          * set_wm_ranges
2155          * -- Linux
2156          * smu_set_watermarks_for_clock_ranges
2157          * renoir_set_watermarks_table
2158          * smu_write_watermarks_table
2159          *
2160          * For Linux,
2161          * dc_hardware_init -> amdgpu_dm_init
2162          * dc_set_power_state --> dm_resume
2163          *
2164          * therefore, this function apply to navi10/12/14 but not Renoir
2165          * *
2166          */
2167         switch (adev->ip_versions[DCE_HWIP][0]) {
2168         case IP_VERSION(2, 0, 2):
2169         case IP_VERSION(2, 0, 0):
2170                 break;
2171         default:
2172                 return 0;
2173         }
2174
2175         ret = smu_write_watermarks_table(smu);
2176         if (ret) {
2177                 DRM_ERROR("Failed to update WMTABLE!\n");
2178                 return ret;
2179         }
2180
2181         return 0;
2182 }
2183
2184 /**
2185  * dm_hw_init() - Initialize DC device
2186  * @handle: The base driver device containing the amdgpu_dm device.
2187  *
2188  * Initialize the &struct amdgpu_display_manager device. This involves calling
2189  * the initializers of each DM component, then populating the struct with them.
2190  *
2191  * Although the function implies hardware initialization, both hardware and
2192  * software are initialized here. Splitting them out to their relevant init
2193  * hooks is a future TODO item.
2194  *
2195  * Some notable things that are initialized here:
2196  *
2197  * - Display Core, both software and hardware
2198  * - DC modules that we need (freesync and color management)
2199  * - DRM software states
2200  * - Interrupt sources and handlers
2201  * - Vblank support
2202  * - Debug FS entries, if enabled
2203  */
2204 static int dm_hw_init(void *handle)
2205 {
2206         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2207         /* Create DAL display manager */
2208         amdgpu_dm_init(adev);
2209         amdgpu_dm_hpd_init(adev);
2210
2211         return 0;
2212 }
2213
2214 /**
2215  * dm_hw_fini() - Teardown DC device
2216  * @handle: The base driver device containing the amdgpu_dm device.
2217  *
2218  * Teardown components within &struct amdgpu_display_manager that require
2219  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2220  * were loaded. Also flush IRQ workqueues and disable them.
2221  */
2222 static int dm_hw_fini(void *handle)
2223 {
2224         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2225
2226         amdgpu_dm_hpd_fini(adev);
2227
2228         amdgpu_dm_irq_fini(adev);
2229         amdgpu_dm_fini(adev);
2230         return 0;
2231 }
2232
2233
2234 static int dm_enable_vblank(struct drm_crtc *crtc);
2235 static void dm_disable_vblank(struct drm_crtc *crtc);
2236
2237 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2238                                  struct dc_state *state, bool enable)
2239 {
2240         enum dc_irq_source irq_source;
2241         struct amdgpu_crtc *acrtc;
2242         int rc = -EBUSY;
2243         int i = 0;
2244
2245         for (i = 0; i < state->stream_count; i++) {
2246                 acrtc = get_crtc_by_otg_inst(
2247                                 adev, state->stream_status[i].primary_otg_inst);
2248
2249                 if (acrtc && state->stream_status[i].plane_count != 0) {
2250                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2251                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2252                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2253                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2254                         if (rc)
2255                                 DRM_WARN("Failed to %s pflip interrupts\n",
2256                                          enable ? "enable" : "disable");
2257
2258                         if (enable) {
2259                                 rc = dm_enable_vblank(&acrtc->base);
2260                                 if (rc)
2261                                         DRM_WARN("Failed to enable vblank interrupts\n");
2262                         } else {
2263                                 dm_disable_vblank(&acrtc->base);
2264                         }
2265
2266                 }
2267         }
2268
2269 }
2270
2271 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2272 {
2273         struct dc_state *context = NULL;
2274         enum dc_status res = DC_ERROR_UNEXPECTED;
2275         int i;
2276         struct dc_stream_state *del_streams[MAX_PIPES];
2277         int del_streams_count = 0;
2278
2279         memset(del_streams, 0, sizeof(del_streams));
2280
2281         context = dc_create_state(dc);
2282         if (context == NULL)
2283                 goto context_alloc_fail;
2284
2285         dc_resource_state_copy_construct_current(dc, context);
2286
2287         /* First remove from context all streams */
2288         for (i = 0; i < context->stream_count; i++) {
2289                 struct dc_stream_state *stream = context->streams[i];
2290
2291                 del_streams[del_streams_count++] = stream;
2292         }
2293
2294         /* Remove all planes for removed streams and then remove the streams */
2295         for (i = 0; i < del_streams_count; i++) {
2296                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2297                         res = DC_FAIL_DETACH_SURFACES;
2298                         goto fail;
2299                 }
2300
2301                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2302                 if (res != DC_OK)
2303                         goto fail;
2304         }
2305
2306
2307         res = dc_validate_global_state(dc, context, false);
2308
2309         if (res != DC_OK) {
2310                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2311                 goto fail;
2312         }
2313
2314         res = dc_commit_state(dc, context);
2315
2316 fail:
2317         dc_release_state(context);
2318
2319 context_alloc_fail:
2320         return res;
2321 }
2322
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324 {
2325         int i;
2326
2327         if (dm->hpd_rx_offload_wq) {
2328                 for (i = 0; i < dm->dc->caps.max_links; i++)
2329                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330         }
2331 }
2332
2333 static int dm_suspend(void *handle)
2334 {
2335         struct amdgpu_device *adev = handle;
2336         struct amdgpu_display_manager *dm = &adev->dm;
2337         int ret = 0;
2338
2339         if (amdgpu_in_reset(adev)) {
2340                 mutex_lock(&dm->dc_lock);
2341
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343                 dc_allow_idle_optimizations(adev->dm.dc, false);
2344 #endif
2345
2346                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347
2348                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349
2350                 amdgpu_dm_commit_zero_streams(dm->dc);
2351
2352                 amdgpu_dm_irq_suspend(adev);
2353
2354                 hpd_rx_irq_work_suspend(dm);
2355
2356                 return ret;
2357         }
2358
2359         WARN_ON(adev->dm.cached_state);
2360         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2361
2362         s3_handle_mst(adev_to_drm(adev), true);
2363
2364         amdgpu_dm_irq_suspend(adev);
2365
2366         hpd_rx_irq_work_suspend(dm);
2367
2368         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2369
2370         return 0;
2371 }
2372
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375                                              struct drm_crtc *crtc)
2376 {
2377         uint32_t i;
2378         struct drm_connector_state *new_con_state;
2379         struct drm_connector *connector;
2380         struct drm_crtc *crtc_from_state;
2381
2382         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383                 crtc_from_state = new_con_state->crtc;
2384
2385                 if (crtc_from_state == crtc)
2386                         return to_amdgpu_dm_connector(connector);
2387         }
2388
2389         return NULL;
2390 }
2391
2392 static void emulated_link_detect(struct dc_link *link)
2393 {
2394         struct dc_sink_init_data sink_init_data = { 0 };
2395         struct display_sink_capability sink_caps = { 0 };
2396         enum dc_edid_status edid_status;
2397         struct dc_context *dc_ctx = link->ctx;
2398         struct dc_sink *sink = NULL;
2399         struct dc_sink *prev_sink = NULL;
2400
2401         link->type = dc_connection_none;
2402         prev_sink = link->local_sink;
2403
2404         if (prev_sink)
2405                 dc_sink_release(prev_sink);
2406
2407         switch (link->connector_signal) {
2408         case SIGNAL_TYPE_HDMI_TYPE_A: {
2409                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411                 break;
2412         }
2413
2414         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417                 break;
2418         }
2419
2420         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423                 break;
2424         }
2425
2426         case SIGNAL_TYPE_LVDS: {
2427                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2429                 break;
2430         }
2431
2432         case SIGNAL_TYPE_EDP: {
2433                 sink_caps.transaction_type =
2434                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435                 sink_caps.signal = SIGNAL_TYPE_EDP;
2436                 break;
2437         }
2438
2439         case SIGNAL_TYPE_DISPLAY_PORT: {
2440                 sink_caps.transaction_type =
2441                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443                 break;
2444         }
2445
2446         default:
2447                 DC_ERROR("Invalid connector type! signal:%d\n",
2448                         link->connector_signal);
2449                 return;
2450         }
2451
2452         sink_init_data.link = link;
2453         sink_init_data.sink_signal = sink_caps.signal;
2454
2455         sink = dc_sink_create(&sink_init_data);
2456         if (!sink) {
2457                 DC_ERROR("Failed to create sink!\n");
2458                 return;
2459         }
2460
2461         /* dc_sink_create returns a new reference */
2462         link->local_sink = sink;
2463
2464         edid_status = dm_helpers_read_local_edid(
2465                         link->ctx,
2466                         link,
2467                         sink);
2468
2469         if (edid_status != EDID_OK)
2470                 DC_ERROR("Failed to read EDID");
2471
2472 }
2473
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475                                      struct amdgpu_display_manager *dm)
2476 {
2477         struct {
2478                 struct dc_surface_update surface_updates[MAX_SURFACES];
2479                 struct dc_plane_info plane_infos[MAX_SURFACES];
2480                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482                 struct dc_stream_update stream_update;
2483         } * bundle;
2484         int k, m;
2485
2486         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487
2488         if (!bundle) {
2489                 dm_error("Failed to allocate update bundle\n");
2490                 goto cleanup;
2491         }
2492
2493         for (k = 0; k < dc_state->stream_count; k++) {
2494                 bundle->stream_update.stream = dc_state->streams[k];
2495
2496                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497                         bundle->surface_updates[m].surface =
2498                                 dc_state->stream_status->plane_states[m];
2499                         bundle->surface_updates[m].surface->force_full_update =
2500                                 true;
2501                 }
2502                 dc_commit_updates_for_stream(
2503                         dm->dc, bundle->surface_updates,
2504                         dc_state->stream_status->plane_count,
2505                         dc_state->streams[k], &bundle->stream_update, dc_state);
2506         }
2507
2508 cleanup:
2509         kfree(bundle);
2510
2511         return;
2512 }
2513
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2515 {
2516         struct dc_stream_state *stream_state;
2517         struct amdgpu_dm_connector *aconnector = link->priv;
2518         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519         struct dc_stream_update stream_update;
2520         bool dpms_off = true;
2521
2522         memset(&stream_update, 0, sizeof(stream_update));
2523         stream_update.dpms_off = &dpms_off;
2524
2525         mutex_lock(&adev->dm.dc_lock);
2526         stream_state = dc_stream_find_from_link(link);
2527
2528         if (stream_state == NULL) {
2529                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530                 mutex_unlock(&adev->dm.dc_lock);
2531                 return;
2532         }
2533
2534         stream_update.stream = stream_state;
2535         acrtc_state->force_dpms_off = true;
2536         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537                                      stream_state, &stream_update,
2538                                      stream_state->ctx->dc->current_state);
2539         mutex_unlock(&adev->dm.dc_lock);
2540 }
2541
2542 static int dm_resume(void *handle)
2543 {
2544         struct amdgpu_device *adev = handle;
2545         struct drm_device *ddev = adev_to_drm(adev);
2546         struct amdgpu_display_manager *dm = &adev->dm;
2547         struct amdgpu_dm_connector *aconnector;
2548         struct drm_connector *connector;
2549         struct drm_connector_list_iter iter;
2550         struct drm_crtc *crtc;
2551         struct drm_crtc_state *new_crtc_state;
2552         struct dm_crtc_state *dm_new_crtc_state;
2553         struct drm_plane *plane;
2554         struct drm_plane_state *new_plane_state;
2555         struct dm_plane_state *dm_new_plane_state;
2556         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557         enum dc_connection_type new_connection_type = dc_connection_none;
2558         struct dc_state *dc_state;
2559         int i, r, j;
2560
2561         if (amdgpu_in_reset(adev)) {
2562                 dc_state = dm->cached_dc_state;
2563
2564                 r = dm_dmub_hw_init(adev);
2565                 if (r)
2566                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2567
2568                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2569                 dc_resume(dm->dc);
2570
2571                 amdgpu_dm_irq_resume_early(adev);
2572
2573                 for (i = 0; i < dc_state->stream_count; i++) {
2574                         dc_state->streams[i]->mode_changed = true;
2575                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2576                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2577                                         = 0xffffffff;
2578                         }
2579                 }
2580 #if defined(CONFIG_DRM_AMD_DC_DCN)
2581                 /*
2582                  * Resource allocation happens for link encoders for newer ASIC in
2583                  * dc_validate_global_state, so we need to revalidate it.
2584                  *
2585                  * This shouldn't fail (it passed once before), so warn if it does.
2586                  */
2587                 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2588 #endif
2589
2590                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2591
2592                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2593
2594                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2595
2596                 dc_release_state(dm->cached_dc_state);
2597                 dm->cached_dc_state = NULL;
2598
2599                 amdgpu_dm_irq_resume_late(adev);
2600
2601                 mutex_unlock(&dm->dc_lock);
2602
2603                 return 0;
2604         }
2605         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2606         dc_release_state(dm_state->context);
2607         dm_state->context = dc_create_state(dm->dc);
2608         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2609         dc_resource_state_construct(dm->dc, dm_state->context);
2610
2611         /* Before powering on DC we need to re-initialize DMUB. */
2612         r = dm_dmub_hw_init(adev);
2613         if (r)
2614                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2615
2616         /* power on hardware */
2617         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2618
2619         /* program HPD filter */
2620         dc_resume(dm->dc);
2621
2622         /*
2623          * early enable HPD Rx IRQ, should be done before set mode as short
2624          * pulse interrupts are used for MST
2625          */
2626         amdgpu_dm_irq_resume_early(adev);
2627
2628         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2629         s3_handle_mst(ddev, false);
2630
2631         /* Do detection*/
2632         drm_connector_list_iter_begin(ddev, &iter);
2633         drm_for_each_connector_iter(connector, &iter) {
2634                 aconnector = to_amdgpu_dm_connector(connector);
2635
2636                 /*
2637                  * this is the case when traversing through already created
2638                  * MST connectors, should be skipped
2639                  */
2640                 if (aconnector->mst_port)
2641                         continue;
2642
2643                 mutex_lock(&aconnector->hpd_lock);
2644                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2645                         DRM_ERROR("KMS: Failed to detect connector\n");
2646
2647                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2648                         emulated_link_detect(aconnector->dc_link);
2649                 else
2650                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2651
2652                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2653                         aconnector->fake_enable = false;
2654
2655                 if (aconnector->dc_sink)
2656                         dc_sink_release(aconnector->dc_sink);
2657                 aconnector->dc_sink = NULL;
2658                 amdgpu_dm_update_connector_after_detect(aconnector);
2659                 mutex_unlock(&aconnector->hpd_lock);
2660         }
2661         drm_connector_list_iter_end(&iter);
2662
2663         /* Force mode set in atomic commit */
2664         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2665                 new_crtc_state->active_changed = true;
2666
2667         /*
2668          * atomic_check is expected to create the dc states. We need to release
2669          * them here, since they were duplicated as part of the suspend
2670          * procedure.
2671          */
2672         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2673                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2674                 if (dm_new_crtc_state->stream) {
2675                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2676                         dc_stream_release(dm_new_crtc_state->stream);
2677                         dm_new_crtc_state->stream = NULL;
2678                 }
2679         }
2680
2681         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2682                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2683                 if (dm_new_plane_state->dc_state) {
2684                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2685                         dc_plane_state_release(dm_new_plane_state->dc_state);
2686                         dm_new_plane_state->dc_state = NULL;
2687                 }
2688         }
2689
2690         drm_atomic_helper_resume(ddev, dm->cached_state);
2691
2692         dm->cached_state = NULL;
2693
2694         amdgpu_dm_irq_resume_late(adev);
2695
2696         amdgpu_dm_smu_write_watermarks_table(adev);
2697
2698         return 0;
2699 }
2700
2701 /**
2702  * DOC: DM Lifecycle
2703  *
2704  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2705  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2706  * the base driver's device list to be initialized and torn down accordingly.
2707  *
2708  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2709  */
2710
2711 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2712         .name = "dm",
2713         .early_init = dm_early_init,
2714         .late_init = dm_late_init,
2715         .sw_init = dm_sw_init,
2716         .sw_fini = dm_sw_fini,
2717         .early_fini = amdgpu_dm_early_fini,
2718         .hw_init = dm_hw_init,
2719         .hw_fini = dm_hw_fini,
2720         .suspend = dm_suspend,
2721         .resume = dm_resume,
2722         .is_idle = dm_is_idle,
2723         .wait_for_idle = dm_wait_for_idle,
2724         .check_soft_reset = dm_check_soft_reset,
2725         .soft_reset = dm_soft_reset,
2726         .set_clockgating_state = dm_set_clockgating_state,
2727         .set_powergating_state = dm_set_powergating_state,
2728 };
2729
2730 const struct amdgpu_ip_block_version dm_ip_block =
2731 {
2732         .type = AMD_IP_BLOCK_TYPE_DCE,
2733         .major = 1,
2734         .minor = 0,
2735         .rev = 0,
2736         .funcs = &amdgpu_dm_funcs,
2737 };
2738
2739
2740 /**
2741  * DOC: atomic
2742  *
2743  * *WIP*
2744  */
2745
2746 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2747         .fb_create = amdgpu_display_user_framebuffer_create,
2748         .get_format_info = amd_get_format_info,
2749         .output_poll_changed = drm_fb_helper_output_poll_changed,
2750         .atomic_check = amdgpu_dm_atomic_check,
2751         .atomic_commit = drm_atomic_helper_commit,
2752 };
2753
2754 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2755         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2756 };
2757
2758 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2759 {
2760         u32 max_cll, min_cll, max, min, q, r;
2761         struct amdgpu_dm_backlight_caps *caps;
2762         struct amdgpu_display_manager *dm;
2763         struct drm_connector *conn_base;
2764         struct amdgpu_device *adev;
2765         struct dc_link *link = NULL;
2766         static const u8 pre_computed_values[] = {
2767                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2768                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2769         int i;
2770
2771         if (!aconnector || !aconnector->dc_link)
2772                 return;
2773
2774         link = aconnector->dc_link;
2775         if (link->connector_signal != SIGNAL_TYPE_EDP)
2776                 return;
2777
2778         conn_base = &aconnector->base;
2779         adev = drm_to_adev(conn_base->dev);
2780         dm = &adev->dm;
2781         for (i = 0; i < dm->num_of_edps; i++) {
2782                 if (link == dm->backlight_link[i])
2783                         break;
2784         }
2785         if (i >= dm->num_of_edps)
2786                 return;
2787         caps = &dm->backlight_caps[i];
2788         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2789         caps->aux_support = false;
2790         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2791         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2792
2793         if (caps->ext_caps->bits.oled == 1 /*||
2794             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2795             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2796                 caps->aux_support = true;
2797
2798         if (amdgpu_backlight == 0)
2799                 caps->aux_support = false;
2800         else if (amdgpu_backlight == 1)
2801                 caps->aux_support = true;
2802
2803         /* From the specification (CTA-861-G), for calculating the maximum
2804          * luminance we need to use:
2805          *      Luminance = 50*2**(CV/32)
2806          * Where CV is a one-byte value.
2807          * For calculating this expression we may need float point precision;
2808          * to avoid this complexity level, we take advantage that CV is divided
2809          * by a constant. From the Euclids division algorithm, we know that CV
2810          * can be written as: CV = 32*q + r. Next, we replace CV in the
2811          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2812          * need to pre-compute the value of r/32. For pre-computing the values
2813          * We just used the following Ruby line:
2814          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2815          * The results of the above expressions can be verified at
2816          * pre_computed_values.
2817          */
2818         q = max_cll >> 5;
2819         r = max_cll % 32;
2820         max = (1 << q) * pre_computed_values[r];
2821
2822         // min luminance: maxLum * (CV/255)^2 / 100
2823         q = DIV_ROUND_CLOSEST(min_cll, 255);
2824         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2825
2826         caps->aux_max_input_signal = max;
2827         caps->aux_min_input_signal = min;
2828 }
2829
2830 void amdgpu_dm_update_connector_after_detect(
2831                 struct amdgpu_dm_connector *aconnector)
2832 {
2833         struct drm_connector *connector = &aconnector->base;
2834         struct drm_device *dev = connector->dev;
2835         struct dc_sink *sink;
2836
2837         /* MST handled by drm_mst framework */
2838         if (aconnector->mst_mgr.mst_state == true)
2839                 return;
2840
2841         sink = aconnector->dc_link->local_sink;
2842         if (sink)
2843                 dc_sink_retain(sink);
2844
2845         /*
2846          * Edid mgmt connector gets first update only in mode_valid hook and then
2847          * the connector sink is set to either fake or physical sink depends on link status.
2848          * Skip if already done during boot.
2849          */
2850         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2851                         && aconnector->dc_em_sink) {
2852
2853                 /*
2854                  * For S3 resume with headless use eml_sink to fake stream
2855                  * because on resume connector->sink is set to NULL
2856                  */
2857                 mutex_lock(&dev->mode_config.mutex);
2858
2859                 if (sink) {
2860                         if (aconnector->dc_sink) {
2861                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2862                                 /*
2863                                  * retain and release below are used to
2864                                  * bump up refcount for sink because the link doesn't point
2865                                  * to it anymore after disconnect, so on next crtc to connector
2866                                  * reshuffle by UMD we will get into unwanted dc_sink release
2867                                  */
2868                                 dc_sink_release(aconnector->dc_sink);
2869                         }
2870                         aconnector->dc_sink = sink;
2871                         dc_sink_retain(aconnector->dc_sink);
2872                         amdgpu_dm_update_freesync_caps(connector,
2873                                         aconnector->edid);
2874                 } else {
2875                         amdgpu_dm_update_freesync_caps(connector, NULL);
2876                         if (!aconnector->dc_sink) {
2877                                 aconnector->dc_sink = aconnector->dc_em_sink;
2878                                 dc_sink_retain(aconnector->dc_sink);
2879                         }
2880                 }
2881
2882                 mutex_unlock(&dev->mode_config.mutex);
2883
2884                 if (sink)
2885                         dc_sink_release(sink);
2886                 return;
2887         }
2888
2889         /*
2890          * TODO: temporary guard to look for proper fix
2891          * if this sink is MST sink, we should not do anything
2892          */
2893         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2894                 dc_sink_release(sink);
2895                 return;
2896         }
2897
2898         if (aconnector->dc_sink == sink) {
2899                 /*
2900                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2901                  * Do nothing!!
2902                  */
2903                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2904                                 aconnector->connector_id);
2905                 if (sink)
2906                         dc_sink_release(sink);
2907                 return;
2908         }
2909
2910         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2911                 aconnector->connector_id, aconnector->dc_sink, sink);
2912
2913         mutex_lock(&dev->mode_config.mutex);
2914
2915         /*
2916          * 1. Update status of the drm connector
2917          * 2. Send an event and let userspace tell us what to do
2918          */
2919         if (sink) {
2920                 /*
2921                  * TODO: check if we still need the S3 mode update workaround.
2922                  * If yes, put it here.
2923                  */
2924                 if (aconnector->dc_sink) {
2925                         amdgpu_dm_update_freesync_caps(connector, NULL);
2926                         dc_sink_release(aconnector->dc_sink);
2927                 }
2928
2929                 aconnector->dc_sink = sink;
2930                 dc_sink_retain(aconnector->dc_sink);
2931                 if (sink->dc_edid.length == 0) {
2932                         aconnector->edid = NULL;
2933                         if (aconnector->dc_link->aux_mode) {
2934                                 drm_dp_cec_unset_edid(
2935                                         &aconnector->dm_dp_aux.aux);
2936                         }
2937                 } else {
2938                         aconnector->edid =
2939                                 (struct edid *)sink->dc_edid.raw_edid;
2940
2941                         drm_connector_update_edid_property(connector,
2942                                                            aconnector->edid);
2943                         if (aconnector->dc_link->aux_mode)
2944                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2945                                                     aconnector->edid);
2946                 }
2947
2948                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2949                 update_connector_ext_caps(aconnector);
2950         } else {
2951                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2952                 amdgpu_dm_update_freesync_caps(connector, NULL);
2953                 drm_connector_update_edid_property(connector, NULL);
2954                 aconnector->num_modes = 0;
2955                 dc_sink_release(aconnector->dc_sink);
2956                 aconnector->dc_sink = NULL;
2957                 aconnector->edid = NULL;
2958 #ifdef CONFIG_DRM_AMD_DC_HDCP
2959                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2960                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2961                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2962 #endif
2963         }
2964
2965         mutex_unlock(&dev->mode_config.mutex);
2966
2967         update_subconnector_property(aconnector);
2968
2969         if (sink)
2970                 dc_sink_release(sink);
2971 }
2972
2973 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2974 {
2975         struct drm_connector *connector = &aconnector->base;
2976         struct drm_device *dev = connector->dev;
2977         enum dc_connection_type new_connection_type = dc_connection_none;
2978         struct amdgpu_device *adev = drm_to_adev(dev);
2979         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2980         struct dm_crtc_state *dm_crtc_state = NULL;
2981
2982         if (adev->dm.disable_hpd_irq)
2983                 return;
2984
2985         if (dm_con_state->base.state && dm_con_state->base.crtc)
2986                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2987                                         dm_con_state->base.state,
2988                                         dm_con_state->base.crtc));
2989         /*
2990          * In case of failure or MST no need to update connector status or notify the OS
2991          * since (for MST case) MST does this in its own context.
2992          */
2993         mutex_lock(&aconnector->hpd_lock);
2994
2995 #ifdef CONFIG_DRM_AMD_DC_HDCP
2996         if (adev->dm.hdcp_workqueue) {
2997                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2998                 dm_con_state->update_hdcp = true;
2999         }
3000 #endif
3001         if (aconnector->fake_enable)
3002                 aconnector->fake_enable = false;
3003
3004         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3005                 DRM_ERROR("KMS: Failed to detect connector\n");
3006
3007         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3008                 emulated_link_detect(aconnector->dc_link);
3009
3010                 drm_modeset_lock_all(dev);
3011                 dm_restore_drm_connector_state(dev, connector);
3012                 drm_modeset_unlock_all(dev);
3013
3014                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3015                         drm_kms_helper_hotplug_event(dev);
3016
3017         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3018                 if (new_connection_type == dc_connection_none &&
3019                     aconnector->dc_link->type == dc_connection_none &&
3020                     dm_crtc_state)
3021                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3022
3023                 amdgpu_dm_update_connector_after_detect(aconnector);
3024
3025                 drm_modeset_lock_all(dev);
3026                 dm_restore_drm_connector_state(dev, connector);
3027                 drm_modeset_unlock_all(dev);
3028
3029                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3030                         drm_kms_helper_hotplug_event(dev);
3031         }
3032         mutex_unlock(&aconnector->hpd_lock);
3033
3034 }
3035
3036 static void handle_hpd_irq(void *param)
3037 {
3038         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3039
3040         handle_hpd_irq_helper(aconnector);
3041
3042 }
3043
3044 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3045 {
3046         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3047         uint8_t dret;
3048         bool new_irq_handled = false;
3049         int dpcd_addr;
3050         int dpcd_bytes_to_read;
3051
3052         const int max_process_count = 30;
3053         int process_count = 0;
3054
3055         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3056
3057         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3058                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3059                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3060                 dpcd_addr = DP_SINK_COUNT;
3061         } else {
3062                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3063                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3064                 dpcd_addr = DP_SINK_COUNT_ESI;
3065         }
3066
3067         dret = drm_dp_dpcd_read(
3068                 &aconnector->dm_dp_aux.aux,
3069                 dpcd_addr,
3070                 esi,
3071                 dpcd_bytes_to_read);
3072
3073         while (dret == dpcd_bytes_to_read &&
3074                 process_count < max_process_count) {
3075                 uint8_t retry;
3076                 dret = 0;
3077
3078                 process_count++;
3079
3080                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3081                 /* handle HPD short pulse irq */
3082                 if (aconnector->mst_mgr.mst_state)
3083                         drm_dp_mst_hpd_irq(
3084                                 &aconnector->mst_mgr,
3085                                 esi,
3086                                 &new_irq_handled);
3087
3088                 if (new_irq_handled) {
3089                         /* ACK at DPCD to notify down stream */
3090                         const int ack_dpcd_bytes_to_write =
3091                                 dpcd_bytes_to_read - 1;
3092
3093                         for (retry = 0; retry < 3; retry++) {
3094                                 uint8_t wret;
3095
3096                                 wret = drm_dp_dpcd_write(
3097                                         &aconnector->dm_dp_aux.aux,
3098                                         dpcd_addr + 1,
3099                                         &esi[1],
3100                                         ack_dpcd_bytes_to_write);
3101                                 if (wret == ack_dpcd_bytes_to_write)
3102                                         break;
3103                         }
3104
3105                         /* check if there is new irq to be handled */
3106                         dret = drm_dp_dpcd_read(
3107                                 &aconnector->dm_dp_aux.aux,
3108                                 dpcd_addr,
3109                                 esi,
3110                                 dpcd_bytes_to_read);
3111
3112                         new_irq_handled = false;
3113                 } else {
3114                         break;
3115                 }
3116         }
3117
3118         if (process_count == max_process_count)
3119                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3120 }
3121
3122 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3123                                                         union hpd_irq_data hpd_irq_data)
3124 {
3125         struct hpd_rx_irq_offload_work *offload_work =
3126                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3127
3128         if (!offload_work) {
3129                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3130                 return;
3131         }
3132
3133         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3134         offload_work->data = hpd_irq_data;
3135         offload_work->offload_wq = offload_wq;
3136
3137         queue_work(offload_wq->wq, &offload_work->work);
3138         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3139 }
3140
3141 static void handle_hpd_rx_irq(void *param)
3142 {
3143         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3144         struct drm_connector *connector = &aconnector->base;
3145         struct drm_device *dev = connector->dev;
3146         struct dc_link *dc_link = aconnector->dc_link;
3147         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3148         bool result = false;
3149         enum dc_connection_type new_connection_type = dc_connection_none;
3150         struct amdgpu_device *adev = drm_to_adev(dev);
3151         union hpd_irq_data hpd_irq_data;
3152         bool link_loss = false;
3153         bool has_left_work = false;
3154         int idx = aconnector->base.index;
3155         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3156
3157         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3158
3159         if (adev->dm.disable_hpd_irq)
3160                 return;
3161
3162         /*
3163          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3164          * conflict, after implement i2c helper, this mutex should be
3165          * retired.
3166          */
3167         mutex_lock(&aconnector->hpd_lock);
3168
3169         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3170                                                 &link_loss, true, &has_left_work);
3171
3172         if (!has_left_work)
3173                 goto out;
3174
3175         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3176                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3177                 goto out;
3178         }
3179
3180         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3181                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3182                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3183                         dm_handle_mst_sideband_msg(aconnector);
3184                         goto out;
3185                 }
3186
3187                 if (link_loss) {
3188                         bool skip = false;
3189
3190                         spin_lock(&offload_wq->offload_lock);
3191                         skip = offload_wq->is_handling_link_loss;
3192
3193                         if (!skip)
3194                                 offload_wq->is_handling_link_loss = true;
3195
3196                         spin_unlock(&offload_wq->offload_lock);
3197
3198                         if (!skip)
3199                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3200
3201                         goto out;
3202                 }
3203         }
3204
3205 out:
3206         if (result && !is_mst_root_connector) {
3207                 /* Downstream Port status changed. */
3208                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3209                         DRM_ERROR("KMS: Failed to detect connector\n");
3210
3211                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3212                         emulated_link_detect(dc_link);
3213
3214                         if (aconnector->fake_enable)
3215                                 aconnector->fake_enable = false;
3216
3217                         amdgpu_dm_update_connector_after_detect(aconnector);
3218
3219
3220                         drm_modeset_lock_all(dev);
3221                         dm_restore_drm_connector_state(dev, connector);
3222                         drm_modeset_unlock_all(dev);
3223
3224                         drm_kms_helper_hotplug_event(dev);
3225                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3226
3227                         if (aconnector->fake_enable)
3228                                 aconnector->fake_enable = false;
3229
3230                         amdgpu_dm_update_connector_after_detect(aconnector);
3231
3232
3233                         drm_modeset_lock_all(dev);
3234                         dm_restore_drm_connector_state(dev, connector);
3235                         drm_modeset_unlock_all(dev);
3236
3237                         drm_kms_helper_hotplug_event(dev);
3238                 }
3239         }
3240 #ifdef CONFIG_DRM_AMD_DC_HDCP
3241         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3242                 if (adev->dm.hdcp_workqueue)
3243                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3244         }
3245 #endif
3246
3247         if (dc_link->type != dc_connection_mst_branch)
3248                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3249
3250         mutex_unlock(&aconnector->hpd_lock);
3251 }
3252
3253 static void register_hpd_handlers(struct amdgpu_device *adev)
3254 {
3255         struct drm_device *dev = adev_to_drm(adev);
3256         struct drm_connector *connector;
3257         struct amdgpu_dm_connector *aconnector;
3258         const struct dc_link *dc_link;
3259         struct dc_interrupt_params int_params = {0};
3260
3261         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3262         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3263
3264         list_for_each_entry(connector,
3265                         &dev->mode_config.connector_list, head) {
3266
3267                 aconnector = to_amdgpu_dm_connector(connector);
3268                 dc_link = aconnector->dc_link;
3269
3270                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3271                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3272                         int_params.irq_source = dc_link->irq_source_hpd;
3273
3274                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3275                                         handle_hpd_irq,
3276                                         (void *) aconnector);
3277                 }
3278
3279                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3280
3281                         /* Also register for DP short pulse (hpd_rx). */
3282                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3283                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3284
3285                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3286                                         handle_hpd_rx_irq,
3287                                         (void *) aconnector);
3288
3289                         if (adev->dm.hpd_rx_offload_wq)
3290                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3291                                         aconnector;
3292                 }
3293         }
3294 }
3295
3296 #if defined(CONFIG_DRM_AMD_DC_SI)
3297 /* Register IRQ sources and initialize IRQ callbacks */
3298 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3299 {
3300         struct dc *dc = adev->dm.dc;
3301         struct common_irq_params *c_irq_params;
3302         struct dc_interrupt_params int_params = {0};
3303         int r;
3304         int i;
3305         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3306
3307         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3308         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3309
3310         /*
3311          * Actions of amdgpu_irq_add_id():
3312          * 1. Register a set() function with base driver.
3313          *    Base driver will call set() function to enable/disable an
3314          *    interrupt in DC hardware.
3315          * 2. Register amdgpu_dm_irq_handler().
3316          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3317          *    coming from DC hardware.
3318          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3319          *    for acknowledging and handling. */
3320
3321         /* Use VBLANK interrupt */
3322         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3323                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3324                 if (r) {
3325                         DRM_ERROR("Failed to add crtc irq id!\n");
3326                         return r;
3327                 }
3328
3329                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3330                 int_params.irq_source =
3331                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3332
3333                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3334
3335                 c_irq_params->adev = adev;
3336                 c_irq_params->irq_src = int_params.irq_source;
3337
3338                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3339                                 dm_crtc_high_irq, c_irq_params);
3340         }
3341
3342         /* Use GRPH_PFLIP interrupt */
3343         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3344                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3345                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3346                 if (r) {
3347                         DRM_ERROR("Failed to add page flip irq id!\n");
3348                         return r;
3349                 }
3350
3351                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3352                 int_params.irq_source =
3353                         dc_interrupt_to_irq_source(dc, i, 0);
3354
3355                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3356
3357                 c_irq_params->adev = adev;
3358                 c_irq_params->irq_src = int_params.irq_source;
3359
3360                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361                                 dm_pflip_high_irq, c_irq_params);
3362
3363         }
3364
3365         /* HPD */
3366         r = amdgpu_irq_add_id(adev, client_id,
3367                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3368         if (r) {
3369                 DRM_ERROR("Failed to add hpd irq id!\n");
3370                 return r;
3371         }
3372
3373         register_hpd_handlers(adev);
3374
3375         return 0;
3376 }
3377 #endif
3378
3379 /* Register IRQ sources and initialize IRQ callbacks */
3380 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3381 {
3382         struct dc *dc = adev->dm.dc;
3383         struct common_irq_params *c_irq_params;
3384         struct dc_interrupt_params int_params = {0};
3385         int r;
3386         int i;
3387         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3388
3389         if (adev->family >= AMDGPU_FAMILY_AI)
3390                 client_id = SOC15_IH_CLIENTID_DCE;
3391
3392         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3393         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3394
3395         /*
3396          * Actions of amdgpu_irq_add_id():
3397          * 1. Register a set() function with base driver.
3398          *    Base driver will call set() function to enable/disable an
3399          *    interrupt in DC hardware.
3400          * 2. Register amdgpu_dm_irq_handler().
3401          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3402          *    coming from DC hardware.
3403          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3404          *    for acknowledging and handling. */
3405
3406         /* Use VBLANK interrupt */
3407         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3408                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3409                 if (r) {
3410                         DRM_ERROR("Failed to add crtc irq id!\n");
3411                         return r;
3412                 }
3413
3414                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3415                 int_params.irq_source =
3416                         dc_interrupt_to_irq_source(dc, i, 0);
3417
3418                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3419
3420                 c_irq_params->adev = adev;
3421                 c_irq_params->irq_src = int_params.irq_source;
3422
3423                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3424                                 dm_crtc_high_irq, c_irq_params);
3425         }
3426
3427         /* Use VUPDATE interrupt */
3428         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3429                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3430                 if (r) {
3431                         DRM_ERROR("Failed to add vupdate irq id!\n");
3432                         return r;
3433                 }
3434
3435                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3436                 int_params.irq_source =
3437                         dc_interrupt_to_irq_source(dc, i, 0);
3438
3439                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3440
3441                 c_irq_params->adev = adev;
3442                 c_irq_params->irq_src = int_params.irq_source;
3443
3444                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3445                                 dm_vupdate_high_irq, c_irq_params);
3446         }
3447
3448         /* Use GRPH_PFLIP interrupt */
3449         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3450                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3451                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3452                 if (r) {
3453                         DRM_ERROR("Failed to add page flip irq id!\n");
3454                         return r;
3455                 }
3456
3457                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3458                 int_params.irq_source =
3459                         dc_interrupt_to_irq_source(dc, i, 0);
3460
3461                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3462
3463                 c_irq_params->adev = adev;
3464                 c_irq_params->irq_src = int_params.irq_source;
3465
3466                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3467                                 dm_pflip_high_irq, c_irq_params);
3468
3469         }
3470
3471         /* HPD */
3472         r = amdgpu_irq_add_id(adev, client_id,
3473                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3474         if (r) {
3475                 DRM_ERROR("Failed to add hpd irq id!\n");
3476                 return r;
3477         }
3478
3479         register_hpd_handlers(adev);
3480
3481         return 0;
3482 }
3483
3484 #if defined(CONFIG_DRM_AMD_DC_DCN)
3485 /* Register IRQ sources and initialize IRQ callbacks */
3486 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3487 {
3488         struct dc *dc = adev->dm.dc;
3489         struct common_irq_params *c_irq_params;
3490         struct dc_interrupt_params int_params = {0};
3491         int r;
3492         int i;
3493 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3494         static const unsigned int vrtl_int_srcid[] = {
3495                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3496                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3497                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3498                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3499                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3500                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3501         };
3502 #endif
3503
3504         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3505         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3506
3507         /*
3508          * Actions of amdgpu_irq_add_id():
3509          * 1. Register a set() function with base driver.
3510          *    Base driver will call set() function to enable/disable an
3511          *    interrupt in DC hardware.
3512          * 2. Register amdgpu_dm_irq_handler().
3513          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3514          *    coming from DC hardware.
3515          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3516          *    for acknowledging and handling.
3517          */
3518
3519         /* Use VSTARTUP interrupt */
3520         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3521                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3522                         i++) {
3523                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3524
3525                 if (r) {
3526                         DRM_ERROR("Failed to add crtc irq id!\n");
3527                         return r;
3528                 }
3529
3530                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3531                 int_params.irq_source =
3532                         dc_interrupt_to_irq_source(dc, i, 0);
3533
3534                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3535
3536                 c_irq_params->adev = adev;
3537                 c_irq_params->irq_src = int_params.irq_source;
3538
3539                 amdgpu_dm_irq_register_interrupt(
3540                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3541         }
3542
3543         /* Use otg vertical line interrupt */
3544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3545         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3546                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3547                                 vrtl_int_srcid[i], &adev->vline0_irq);
3548
3549                 if (r) {
3550                         DRM_ERROR("Failed to add vline0 irq id!\n");
3551                         return r;
3552                 }
3553
3554                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3555                 int_params.irq_source =
3556                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3557
3558                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3559                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3560                         break;
3561                 }
3562
3563                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3564                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3565
3566                 c_irq_params->adev = adev;
3567                 c_irq_params->irq_src = int_params.irq_source;
3568
3569                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3570                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3571         }
3572 #endif
3573
3574         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3575          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3576          * to trigger at end of each vblank, regardless of state of the lock,
3577          * matching DCE behaviour.
3578          */
3579         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3580              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3581              i++) {
3582                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3583
3584                 if (r) {
3585                         DRM_ERROR("Failed to add vupdate irq id!\n");
3586                         return r;
3587                 }
3588
3589                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3590                 int_params.irq_source =
3591                         dc_interrupt_to_irq_source(dc, i, 0);
3592
3593                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3594
3595                 c_irq_params->adev = adev;
3596                 c_irq_params->irq_src = int_params.irq_source;
3597
3598                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3599                                 dm_vupdate_high_irq, c_irq_params);
3600         }
3601
3602         /* Use GRPH_PFLIP interrupt */
3603         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3604                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3605                         i++) {
3606                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3607                 if (r) {
3608                         DRM_ERROR("Failed to add page flip irq id!\n");
3609                         return r;
3610                 }
3611
3612                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3613                 int_params.irq_source =
3614                         dc_interrupt_to_irq_source(dc, i, 0);
3615
3616                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3617
3618                 c_irq_params->adev = adev;
3619                 c_irq_params->irq_src = int_params.irq_source;
3620
3621                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622                                 dm_pflip_high_irq, c_irq_params);
3623
3624         }
3625
3626         /* HPD */
3627         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3628                         &adev->hpd_irq);
3629         if (r) {
3630                 DRM_ERROR("Failed to add hpd irq id!\n");
3631                 return r;
3632         }
3633
3634         register_hpd_handlers(adev);
3635
3636         return 0;
3637 }
3638 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3639 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3640 {
3641         struct dc *dc = adev->dm.dc;
3642         struct common_irq_params *c_irq_params;
3643         struct dc_interrupt_params int_params = {0};
3644         int r, i;
3645
3646         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3647         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3648
3649         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3650                         &adev->dmub_outbox_irq);
3651         if (r) {
3652                 DRM_ERROR("Failed to add outbox irq id!\n");
3653                 return r;
3654         }
3655
3656         if (dc->ctx->dmub_srv) {
3657                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3658                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3659                 int_params.irq_source =
3660                 dc_interrupt_to_irq_source(dc, i, 0);
3661
3662                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3663
3664                 c_irq_params->adev = adev;
3665                 c_irq_params->irq_src = int_params.irq_source;
3666
3667                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3668                                 dm_dmub_outbox1_low_irq, c_irq_params);
3669         }
3670
3671         return 0;
3672 }
3673 #endif
3674
3675 /*
3676  * Acquires the lock for the atomic state object and returns
3677  * the new atomic state.
3678  *
3679  * This should only be called during atomic check.
3680  */
3681 static int dm_atomic_get_state(struct drm_atomic_state *state,
3682                                struct dm_atomic_state **dm_state)
3683 {
3684         struct drm_device *dev = state->dev;
3685         struct amdgpu_device *adev = drm_to_adev(dev);
3686         struct amdgpu_display_manager *dm = &adev->dm;
3687         struct drm_private_state *priv_state;
3688
3689         if (*dm_state)
3690                 return 0;
3691
3692         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3693         if (IS_ERR(priv_state))
3694                 return PTR_ERR(priv_state);
3695
3696         *dm_state = to_dm_atomic_state(priv_state);
3697
3698         return 0;
3699 }
3700
3701 static struct dm_atomic_state *
3702 dm_atomic_get_new_state(struct drm_atomic_state *state)
3703 {
3704         struct drm_device *dev = state->dev;
3705         struct amdgpu_device *adev = drm_to_adev(dev);
3706         struct amdgpu_display_manager *dm = &adev->dm;
3707         struct drm_private_obj *obj;
3708         struct drm_private_state *new_obj_state;
3709         int i;
3710
3711         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3712                 if (obj->funcs == dm->atomic_obj.funcs)
3713                         return to_dm_atomic_state(new_obj_state);
3714         }
3715
3716         return NULL;
3717 }
3718
3719 static struct drm_private_state *
3720 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3721 {
3722         struct dm_atomic_state *old_state, *new_state;
3723
3724         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3725         if (!new_state)
3726                 return NULL;
3727
3728         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3729
3730         old_state = to_dm_atomic_state(obj->state);
3731
3732         if (old_state && old_state->context)
3733                 new_state->context = dc_copy_state(old_state->context);
3734
3735         if (!new_state->context) {
3736                 kfree(new_state);
3737                 return NULL;
3738         }
3739
3740         return &new_state->base;
3741 }
3742
3743 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3744                                     struct drm_private_state *state)
3745 {
3746         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3747
3748         if (dm_state && dm_state->context)
3749                 dc_release_state(dm_state->context);
3750
3751         kfree(dm_state);
3752 }
3753
3754 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3755         .atomic_duplicate_state = dm_atomic_duplicate_state,
3756         .atomic_destroy_state = dm_atomic_destroy_state,
3757 };
3758
3759 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3760 {
3761         struct dm_atomic_state *state;
3762         int r;
3763
3764         adev->mode_info.mode_config_initialized = true;
3765
3766         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3767         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3768
3769         adev_to_drm(adev)->mode_config.max_width = 16384;
3770         adev_to_drm(adev)->mode_config.max_height = 16384;
3771
3772         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3773         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3774         /* indicates support for immediate flip */
3775         adev_to_drm(adev)->mode_config.async_page_flip = true;
3776
3777         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3778
3779         state = kzalloc(sizeof(*state), GFP_KERNEL);
3780         if (!state)
3781                 return -ENOMEM;
3782
3783         state->context = dc_create_state(adev->dm.dc);
3784         if (!state->context) {
3785                 kfree(state);
3786                 return -ENOMEM;
3787         }
3788
3789         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3790
3791         drm_atomic_private_obj_init(adev_to_drm(adev),
3792                                     &adev->dm.atomic_obj,
3793                                     &state->base,
3794                                     &dm_atomic_state_funcs);
3795
3796         r = amdgpu_display_modeset_create_props(adev);
3797         if (r) {
3798                 dc_release_state(state->context);
3799                 kfree(state);
3800                 return r;
3801         }
3802
3803         r = amdgpu_dm_audio_init(adev);
3804         if (r) {
3805                 dc_release_state(state->context);
3806                 kfree(state);
3807                 return r;
3808         }
3809
3810         return 0;
3811 }
3812
3813 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3814 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3815 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3816
3817 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3818         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3819
3820 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3821                                             int bl_idx)
3822 {
3823 #if defined(CONFIG_ACPI)
3824         struct amdgpu_dm_backlight_caps caps;
3825
3826         memset(&caps, 0, sizeof(caps));
3827
3828         if (dm->backlight_caps[bl_idx].caps_valid)
3829                 return;
3830
3831         amdgpu_acpi_get_backlight_caps(&caps);
3832         if (caps.caps_valid) {
3833                 dm->backlight_caps[bl_idx].caps_valid = true;
3834                 if (caps.aux_support)
3835                         return;
3836                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3837                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3838         } else {
3839                 dm->backlight_caps[bl_idx].min_input_signal =
3840                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3841                 dm->backlight_caps[bl_idx].max_input_signal =
3842                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3843         }
3844 #else
3845         if (dm->backlight_caps[bl_idx].aux_support)
3846                 return;
3847
3848         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3849         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3850 #endif
3851 }
3852
3853 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3854                                 unsigned *min, unsigned *max)
3855 {
3856         if (!caps)
3857                 return 0;
3858
3859         if (caps->aux_support) {
3860                 // Firmware limits are in nits, DC API wants millinits.
3861                 *max = 1000 * caps->aux_max_input_signal;
3862                 *min = 1000 * caps->aux_min_input_signal;
3863         } else {
3864                 // Firmware limits are 8-bit, PWM control is 16-bit.
3865                 *max = 0x101 * caps->max_input_signal;
3866                 *min = 0x101 * caps->min_input_signal;
3867         }
3868         return 1;
3869 }
3870
3871 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3872                                         uint32_t brightness)
3873 {
3874         unsigned min, max;
3875
3876         if (!get_brightness_range(caps, &min, &max))
3877                 return brightness;
3878
3879         // Rescale 0..255 to min..max
3880         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3881                                        AMDGPU_MAX_BL_LEVEL);
3882 }
3883
3884 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3885                                       uint32_t brightness)
3886 {
3887         unsigned min, max;
3888
3889         if (!get_brightness_range(caps, &min, &max))
3890                 return brightness;
3891
3892         if (brightness < min)
3893                 return 0;
3894         // Rescale min..max to 0..255
3895         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3896                                  max - min);
3897 }
3898
3899 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3900                                          int bl_idx,
3901                                          u32 user_brightness)
3902 {
3903         struct amdgpu_dm_backlight_caps caps;
3904         struct dc_link *link;
3905         u32 brightness;
3906         bool rc;
3907
3908         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3909         caps = dm->backlight_caps[bl_idx];
3910
3911         dm->brightness[bl_idx] = user_brightness;
3912         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3913         link = (struct dc_link *)dm->backlight_link[bl_idx];
3914
3915         /* Change brightness based on AUX property */
3916         if (caps.aux_support) {
3917                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3918                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3919                 if (!rc)
3920                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3921         } else {
3922                 rc = dc_link_set_backlight_level(link, brightness, 0);
3923                 if (!rc)
3924                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3925         }
3926
3927         return rc ? 0 : 1;
3928 }
3929
3930 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3931 {
3932         struct amdgpu_display_manager *dm = bl_get_data(bd);
3933         int i;
3934
3935         for (i = 0; i < dm->num_of_edps; i++) {
3936                 if (bd == dm->backlight_dev[i])
3937                         break;
3938         }
3939         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3940                 i = 0;
3941         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3942
3943         return 0;
3944 }
3945
3946 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3947                                          int bl_idx)
3948 {
3949         struct amdgpu_dm_backlight_caps caps;
3950         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3951
3952         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3953         caps = dm->backlight_caps[bl_idx];
3954
3955         if (caps.aux_support) {
3956                 u32 avg, peak;
3957                 bool rc;
3958
3959                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3960                 if (!rc)
3961                         return dm->brightness[bl_idx];
3962                 return convert_brightness_to_user(&caps, avg);
3963         } else {
3964                 int ret = dc_link_get_backlight_level(link);
3965
3966                 if (ret == DC_ERROR_UNEXPECTED)
3967                         return dm->brightness[bl_idx];
3968                 return convert_brightness_to_user(&caps, ret);
3969         }
3970 }
3971
3972 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3973 {
3974         struct amdgpu_display_manager *dm = bl_get_data(bd);
3975         int i;
3976
3977         for (i = 0; i < dm->num_of_edps; i++) {
3978                 if (bd == dm->backlight_dev[i])
3979                         break;
3980         }
3981         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3982                 i = 0;
3983         return amdgpu_dm_backlight_get_level(dm, i);
3984 }
3985
3986 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3987         .options = BL_CORE_SUSPENDRESUME,
3988         .get_brightness = amdgpu_dm_backlight_get_brightness,
3989         .update_status  = amdgpu_dm_backlight_update_status,
3990 };
3991
3992 static void
3993 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3994 {
3995         char bl_name[16];
3996         struct backlight_properties props = { 0 };
3997
3998         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3999         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4000
4001         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4002         props.brightness = AMDGPU_MAX_BL_LEVEL;
4003         props.type = BACKLIGHT_RAW;
4004
4005         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4006                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4007
4008         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4009                                                                        adev_to_drm(dm->adev)->dev,
4010                                                                        dm,
4011                                                                        &amdgpu_dm_backlight_ops,
4012                                                                        &props);
4013
4014         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4015                 DRM_ERROR("DM: Backlight registration failed!\n");
4016         else
4017                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4018 }
4019 #endif
4020
4021 static int initialize_plane(struct amdgpu_display_manager *dm,
4022                             struct amdgpu_mode_info *mode_info, int plane_id,
4023                             enum drm_plane_type plane_type,
4024                             const struct dc_plane_cap *plane_cap)
4025 {
4026         struct drm_plane *plane;
4027         unsigned long possible_crtcs;
4028         int ret = 0;
4029
4030         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4031         if (!plane) {
4032                 DRM_ERROR("KMS: Failed to allocate plane\n");
4033                 return -ENOMEM;
4034         }
4035         plane->type = plane_type;
4036
4037         /*
4038          * HACK: IGT tests expect that the primary plane for a CRTC
4039          * can only have one possible CRTC. Only expose support for
4040          * any CRTC if they're not going to be used as a primary plane
4041          * for a CRTC - like overlay or underlay planes.
4042          */
4043         possible_crtcs = 1 << plane_id;
4044         if (plane_id >= dm->dc->caps.max_streams)
4045                 possible_crtcs = 0xff;
4046
4047         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4048
4049         if (ret) {
4050                 DRM_ERROR("KMS: Failed to initialize plane\n");
4051                 kfree(plane);
4052                 return ret;
4053         }
4054
4055         if (mode_info)
4056                 mode_info->planes[plane_id] = plane;
4057
4058         return ret;
4059 }
4060
4061
4062 static void register_backlight_device(struct amdgpu_display_manager *dm,
4063                                       struct dc_link *link)
4064 {
4065 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4066         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4067
4068         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4069             link->type != dc_connection_none) {
4070                 /*
4071                  * Event if registration failed, we should continue with
4072                  * DM initialization because not having a backlight control
4073                  * is better then a black screen.
4074                  */
4075                 if (!dm->backlight_dev[dm->num_of_edps])
4076                         amdgpu_dm_register_backlight_device(dm);
4077
4078                 if (dm->backlight_dev[dm->num_of_edps]) {
4079                         dm->backlight_link[dm->num_of_edps] = link;
4080                         dm->num_of_edps++;
4081                 }
4082         }
4083 #endif
4084 }
4085
4086
4087 /*
4088  * In this architecture, the association
4089  * connector -> encoder -> crtc
4090  * id not really requried. The crtc and connector will hold the
4091  * display_index as an abstraction to use with DAL component
4092  *
4093  * Returns 0 on success
4094  */
4095 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4096 {
4097         struct amdgpu_display_manager *dm = &adev->dm;
4098         int32_t i;
4099         struct amdgpu_dm_connector *aconnector = NULL;
4100         struct amdgpu_encoder *aencoder = NULL;
4101         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4102         uint32_t link_cnt;
4103         int32_t primary_planes;
4104         enum dc_connection_type new_connection_type = dc_connection_none;
4105         const struct dc_plane_cap *plane;
4106         bool psr_feature_enabled = false;
4107
4108         dm->display_indexes_num = dm->dc->caps.max_streams;
4109         /* Update the actual used number of crtc */
4110         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4111
4112         link_cnt = dm->dc->caps.max_links;
4113         if (amdgpu_dm_mode_config_init(dm->adev)) {
4114                 DRM_ERROR("DM: Failed to initialize mode config\n");
4115                 return -EINVAL;
4116         }
4117
4118         /* There is one primary plane per CRTC */
4119         primary_planes = dm->dc->caps.max_streams;
4120         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4121
4122         /*
4123          * Initialize primary planes, implicit planes for legacy IOCTLS.
4124          * Order is reversed to match iteration order in atomic check.
4125          */
4126         for (i = (primary_planes - 1); i >= 0; i--) {
4127                 plane = &dm->dc->caps.planes[i];
4128
4129                 if (initialize_plane(dm, mode_info, i,
4130                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4131                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4132                         goto fail;
4133                 }
4134         }
4135
4136         /*
4137          * Initialize overlay planes, index starting after primary planes.
4138          * These planes have a higher DRM index than the primary planes since
4139          * they should be considered as having a higher z-order.
4140          * Order is reversed to match iteration order in atomic check.
4141          *
4142          * Only support DCN for now, and only expose one so we don't encourage
4143          * userspace to use up all the pipes.
4144          */
4145         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4146                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4147
4148                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4149                         continue;
4150
4151                 if (!plane->blends_with_above || !plane->blends_with_below)
4152                         continue;
4153
4154                 if (!plane->pixel_format_support.argb8888)
4155                         continue;
4156
4157                 if (initialize_plane(dm, NULL, primary_planes + i,
4158                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4159                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4160                         goto fail;
4161                 }
4162
4163                 /* Only create one overlay plane. */
4164                 break;
4165         }
4166
4167         for (i = 0; i < dm->dc->caps.max_streams; i++)
4168                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4169                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4170                         goto fail;
4171                 }
4172
4173 #if defined(CONFIG_DRM_AMD_DC_DCN)
4174         /* Use Outbox interrupt */
4175         switch (adev->ip_versions[DCE_HWIP][0]) {
4176         case IP_VERSION(3, 0, 0):
4177         case IP_VERSION(3, 1, 2):
4178         case IP_VERSION(3, 1, 3):
4179         case IP_VERSION(2, 1, 0):
4180                 if (register_outbox_irq_handlers(dm->adev)) {
4181                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4182                         goto fail;
4183                 }
4184                 break;
4185         default:
4186                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4187                               adev->ip_versions[DCE_HWIP][0]);
4188         }
4189
4190         /* Determine whether to enable PSR support by default. */
4191         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4192                 switch (adev->ip_versions[DCE_HWIP][0]) {
4193                 case IP_VERSION(3, 1, 2):
4194                 case IP_VERSION(3, 1, 3):
4195                         psr_feature_enabled = true;
4196                         break;
4197                 default:
4198                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4199                         break;
4200                 }
4201         }
4202 #endif
4203
4204         /* loops over all connectors on the board */
4205         for (i = 0; i < link_cnt; i++) {
4206                 struct dc_link *link = NULL;
4207
4208                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4209                         DRM_ERROR(
4210                                 "KMS: Cannot support more than %d display indexes\n",
4211                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4212                         continue;
4213                 }
4214
4215                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4216                 if (!aconnector)
4217                         goto fail;
4218
4219                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4220                 if (!aencoder)
4221                         goto fail;
4222
4223                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4224                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4225                         goto fail;
4226                 }
4227
4228                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4229                         DRM_ERROR("KMS: Failed to initialize connector\n");
4230                         goto fail;
4231                 }
4232
4233                 link = dc_get_link_at_index(dm->dc, i);
4234
4235                 if (!dc_link_detect_sink(link, &new_connection_type))
4236                         DRM_ERROR("KMS: Failed to detect connector\n");
4237
4238                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4239                         emulated_link_detect(link);
4240                         amdgpu_dm_update_connector_after_detect(aconnector);
4241
4242                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4243                         amdgpu_dm_update_connector_after_detect(aconnector);
4244                         register_backlight_device(dm, link);
4245
4246                         if (psr_feature_enabled)
4247                                 amdgpu_dm_set_psr_caps(link);
4248                 }
4249
4250
4251         }
4252
4253         /* Software is initialized. Now we can register interrupt handlers. */
4254         switch (adev->asic_type) {
4255 #if defined(CONFIG_DRM_AMD_DC_SI)
4256         case CHIP_TAHITI:
4257         case CHIP_PITCAIRN:
4258         case CHIP_VERDE:
4259         case CHIP_OLAND:
4260                 if (dce60_register_irq_handlers(dm->adev)) {
4261                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4262                         goto fail;
4263                 }
4264                 break;
4265 #endif
4266         case CHIP_BONAIRE:
4267         case CHIP_HAWAII:
4268         case CHIP_KAVERI:
4269         case CHIP_KABINI:
4270         case CHIP_MULLINS:
4271         case CHIP_TONGA:
4272         case CHIP_FIJI:
4273         case CHIP_CARRIZO:
4274         case CHIP_STONEY:
4275         case CHIP_POLARIS11:
4276         case CHIP_POLARIS10:
4277         case CHIP_POLARIS12:
4278         case CHIP_VEGAM:
4279         case CHIP_VEGA10:
4280         case CHIP_VEGA12:
4281         case CHIP_VEGA20:
4282                 if (dce110_register_irq_handlers(dm->adev)) {
4283                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4284                         goto fail;
4285                 }
4286                 break;
4287         default:
4288 #if defined(CONFIG_DRM_AMD_DC_DCN)
4289                 switch (adev->ip_versions[DCE_HWIP][0]) {
4290                 case IP_VERSION(1, 0, 0):
4291                 case IP_VERSION(1, 0, 1):
4292                 case IP_VERSION(2, 0, 2):
4293                 case IP_VERSION(2, 0, 3):
4294                 case IP_VERSION(2, 0, 0):
4295                 case IP_VERSION(2, 1, 0):
4296                 case IP_VERSION(3, 0, 0):
4297                 case IP_VERSION(3, 0, 2):
4298                 case IP_VERSION(3, 0, 3):
4299                 case IP_VERSION(3, 0, 1):
4300                 case IP_VERSION(3, 1, 2):
4301                 case IP_VERSION(3, 1, 3):
4302                         if (dcn10_register_irq_handlers(dm->adev)) {
4303                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4304                                 goto fail;
4305                         }
4306                         break;
4307                 default:
4308                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4309                                         adev->ip_versions[DCE_HWIP][0]);
4310                         goto fail;
4311                 }
4312 #endif
4313                 break;
4314         }
4315
4316         return 0;
4317 fail:
4318         kfree(aencoder);
4319         kfree(aconnector);
4320
4321         return -EINVAL;
4322 }
4323
4324 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4325 {
4326         drm_atomic_private_obj_fini(&dm->atomic_obj);
4327         return;
4328 }
4329
4330 /******************************************************************************
4331  * amdgpu_display_funcs functions
4332  *****************************************************************************/
4333
4334 /*
4335  * dm_bandwidth_update - program display watermarks
4336  *
4337  * @adev: amdgpu_device pointer
4338  *
4339  * Calculate and program the display watermarks and line buffer allocation.
4340  */
4341 static void dm_bandwidth_update(struct amdgpu_device *adev)
4342 {
4343         /* TODO: implement later */
4344 }
4345
4346 static const struct amdgpu_display_funcs dm_display_funcs = {
4347         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4348         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4349         .backlight_set_level = NULL, /* never called for DC */
4350         .backlight_get_level = NULL, /* never called for DC */
4351         .hpd_sense = NULL,/* called unconditionally */
4352         .hpd_set_polarity = NULL, /* called unconditionally */
4353         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4354         .page_flip_get_scanoutpos =
4355                 dm_crtc_get_scanoutpos,/* called unconditionally */
4356         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4357         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4358 };
4359
4360 #if defined(CONFIG_DEBUG_KERNEL_DC)
4361
4362 static ssize_t s3_debug_store(struct device *device,
4363                               struct device_attribute *attr,
4364                               const char *buf,
4365                               size_t count)
4366 {
4367         int ret;
4368         int s3_state;
4369         struct drm_device *drm_dev = dev_get_drvdata(device);
4370         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4371
4372         ret = kstrtoint(buf, 0, &s3_state);
4373
4374         if (ret == 0) {
4375                 if (s3_state) {
4376                         dm_resume(adev);
4377                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4378                 } else
4379                         dm_suspend(adev);
4380         }
4381
4382         return ret == 0 ? count : 0;
4383 }
4384
4385 DEVICE_ATTR_WO(s3_debug);
4386
4387 #endif
4388
4389 static int dm_early_init(void *handle)
4390 {
4391         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4392
4393         switch (adev->asic_type) {
4394 #if defined(CONFIG_DRM_AMD_DC_SI)
4395         case CHIP_TAHITI:
4396         case CHIP_PITCAIRN:
4397         case CHIP_VERDE:
4398                 adev->mode_info.num_crtc = 6;
4399                 adev->mode_info.num_hpd = 6;
4400                 adev->mode_info.num_dig = 6;
4401                 break;
4402         case CHIP_OLAND:
4403                 adev->mode_info.num_crtc = 2;
4404                 adev->mode_info.num_hpd = 2;
4405                 adev->mode_info.num_dig = 2;
4406                 break;
4407 #endif
4408         case CHIP_BONAIRE:
4409         case CHIP_HAWAII:
4410                 adev->mode_info.num_crtc = 6;
4411                 adev->mode_info.num_hpd = 6;
4412                 adev->mode_info.num_dig = 6;
4413                 break;
4414         case CHIP_KAVERI:
4415                 adev->mode_info.num_crtc = 4;
4416                 adev->mode_info.num_hpd = 6;
4417                 adev->mode_info.num_dig = 7;
4418                 break;
4419         case CHIP_KABINI:
4420         case CHIP_MULLINS:
4421                 adev->mode_info.num_crtc = 2;
4422                 adev->mode_info.num_hpd = 6;
4423                 adev->mode_info.num_dig = 6;
4424                 break;
4425         case CHIP_FIJI:
4426         case CHIP_TONGA:
4427                 adev->mode_info.num_crtc = 6;
4428                 adev->mode_info.num_hpd = 6;
4429                 adev->mode_info.num_dig = 7;
4430                 break;
4431         case CHIP_CARRIZO:
4432                 adev->mode_info.num_crtc = 3;
4433                 adev->mode_info.num_hpd = 6;
4434                 adev->mode_info.num_dig = 9;
4435                 break;
4436         case CHIP_STONEY:
4437                 adev->mode_info.num_crtc = 2;
4438                 adev->mode_info.num_hpd = 6;
4439                 adev->mode_info.num_dig = 9;
4440                 break;
4441         case CHIP_POLARIS11:
4442         case CHIP_POLARIS12:
4443                 adev->mode_info.num_crtc = 5;
4444                 adev->mode_info.num_hpd = 5;
4445                 adev->mode_info.num_dig = 5;
4446                 break;
4447         case CHIP_POLARIS10:
4448         case CHIP_VEGAM:
4449                 adev->mode_info.num_crtc = 6;
4450                 adev->mode_info.num_hpd = 6;
4451                 adev->mode_info.num_dig = 6;
4452                 break;
4453         case CHIP_VEGA10:
4454         case CHIP_VEGA12:
4455         case CHIP_VEGA20:
4456                 adev->mode_info.num_crtc = 6;
4457                 adev->mode_info.num_hpd = 6;
4458                 adev->mode_info.num_dig = 6;
4459                 break;
4460         default:
4461 #if defined(CONFIG_DRM_AMD_DC_DCN)
4462                 switch (adev->ip_versions[DCE_HWIP][0]) {
4463                 case IP_VERSION(2, 0, 2):
4464                 case IP_VERSION(3, 0, 0):
4465                         adev->mode_info.num_crtc = 6;
4466                         adev->mode_info.num_hpd = 6;
4467                         adev->mode_info.num_dig = 6;
4468                         break;
4469                 case IP_VERSION(2, 0, 0):
4470                 case IP_VERSION(3, 0, 2):
4471                         adev->mode_info.num_crtc = 5;
4472                         adev->mode_info.num_hpd = 5;
4473                         adev->mode_info.num_dig = 5;
4474                         break;
4475                 case IP_VERSION(2, 0, 3):
4476                 case IP_VERSION(3, 0, 3):
4477                         adev->mode_info.num_crtc = 2;
4478                         adev->mode_info.num_hpd = 2;
4479                         adev->mode_info.num_dig = 2;
4480                         break;
4481                 case IP_VERSION(1, 0, 0):
4482                 case IP_VERSION(1, 0, 1):
4483                 case IP_VERSION(3, 0, 1):
4484                 case IP_VERSION(2, 1, 0):
4485                 case IP_VERSION(3, 1, 2):
4486                 case IP_VERSION(3, 1, 3):
4487                         adev->mode_info.num_crtc = 4;
4488                         adev->mode_info.num_hpd = 4;
4489                         adev->mode_info.num_dig = 4;
4490                         break;
4491                 default:
4492                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4493                                         adev->ip_versions[DCE_HWIP][0]);
4494                         return -EINVAL;
4495                 }
4496 #endif
4497                 break;
4498         }
4499
4500         amdgpu_dm_set_irq_funcs(adev);
4501
4502         if (adev->mode_info.funcs == NULL)
4503                 adev->mode_info.funcs = &dm_display_funcs;
4504
4505         /*
4506          * Note: Do NOT change adev->audio_endpt_rreg and
4507          * adev->audio_endpt_wreg because they are initialised in
4508          * amdgpu_device_init()
4509          */
4510 #if defined(CONFIG_DEBUG_KERNEL_DC)
4511         device_create_file(
4512                 adev_to_drm(adev)->dev,
4513                 &dev_attr_s3_debug);
4514 #endif
4515
4516         return 0;
4517 }
4518
4519 static bool modeset_required(struct drm_crtc_state *crtc_state,
4520                              struct dc_stream_state *new_stream,
4521                              struct dc_stream_state *old_stream)
4522 {
4523         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4524 }
4525
4526 static bool modereset_required(struct drm_crtc_state *crtc_state)
4527 {
4528         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4529 }
4530
4531 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4532 {
4533         drm_encoder_cleanup(encoder);
4534         kfree(encoder);
4535 }
4536
4537 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4538         .destroy = amdgpu_dm_encoder_destroy,
4539 };
4540
4541
4542 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4543                                          struct drm_framebuffer *fb,
4544                                          int *min_downscale, int *max_upscale)
4545 {
4546         struct amdgpu_device *adev = drm_to_adev(dev);
4547         struct dc *dc = adev->dm.dc;
4548         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4549         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4550
4551         switch (fb->format->format) {
4552         case DRM_FORMAT_P010:
4553         case DRM_FORMAT_NV12:
4554         case DRM_FORMAT_NV21:
4555                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4556                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4557                 break;
4558
4559         case DRM_FORMAT_XRGB16161616F:
4560         case DRM_FORMAT_ARGB16161616F:
4561         case DRM_FORMAT_XBGR16161616F:
4562         case DRM_FORMAT_ABGR16161616F:
4563                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4564                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4565                 break;
4566
4567         default:
4568                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4569                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4570                 break;
4571         }
4572
4573         /*
4574          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4575          * scaling factor of 1.0 == 1000 units.
4576          */
4577         if (*max_upscale == 1)
4578                 *max_upscale = 1000;
4579
4580         if (*min_downscale == 1)
4581                 *min_downscale = 1000;
4582 }
4583
4584
4585 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4586                                 const struct drm_plane_state *state,
4587                                 struct dc_scaling_info *scaling_info)
4588 {
4589         int scale_w, scale_h, min_downscale, max_upscale;
4590
4591         memset(scaling_info, 0, sizeof(*scaling_info));
4592
4593         /* Source is fixed 16.16 but we ignore mantissa for now... */
4594         scaling_info->src_rect.x = state->src_x >> 16;
4595         scaling_info->src_rect.y = state->src_y >> 16;
4596
4597         /*
4598          * For reasons we don't (yet) fully understand a non-zero
4599          * src_y coordinate into an NV12 buffer can cause a
4600          * system hang on DCN1x.
4601          * To avoid hangs (and maybe be overly cautious)
4602          * let's reject both non-zero src_x and src_y.
4603          *
4604          * We currently know of only one use-case to reproduce a
4605          * scenario with non-zero src_x and src_y for NV12, which
4606          * is to gesture the YouTube Android app into full screen
4607          * on ChromeOS.
4608          */
4609         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4610             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4611             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4612             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4613                 return -EINVAL;
4614
4615         scaling_info->src_rect.width = state->src_w >> 16;
4616         if (scaling_info->src_rect.width == 0)
4617                 return -EINVAL;
4618
4619         scaling_info->src_rect.height = state->src_h >> 16;
4620         if (scaling_info->src_rect.height == 0)
4621                 return -EINVAL;
4622
4623         scaling_info->dst_rect.x = state->crtc_x;
4624         scaling_info->dst_rect.y = state->crtc_y;
4625
4626         if (state->crtc_w == 0)
4627                 return -EINVAL;
4628
4629         scaling_info->dst_rect.width = state->crtc_w;
4630
4631         if (state->crtc_h == 0)
4632                 return -EINVAL;
4633
4634         scaling_info->dst_rect.height = state->crtc_h;
4635
4636         /* DRM doesn't specify clipping on destination output. */
4637         scaling_info->clip_rect = scaling_info->dst_rect;
4638
4639         /* Validate scaling per-format with DC plane caps */
4640         if (state->plane && state->plane->dev && state->fb) {
4641                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4642                                              &min_downscale, &max_upscale);
4643         } else {
4644                 min_downscale = 250;
4645                 max_upscale = 16000;
4646         }
4647
4648         scale_w = scaling_info->dst_rect.width * 1000 /
4649                   scaling_info->src_rect.width;
4650
4651         if (scale_w < min_downscale || scale_w > max_upscale)
4652                 return -EINVAL;
4653
4654         scale_h = scaling_info->dst_rect.height * 1000 /
4655                   scaling_info->src_rect.height;
4656
4657         if (scale_h < min_downscale || scale_h > max_upscale)
4658                 return -EINVAL;
4659
4660         /*
4661          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4662          * assume reasonable defaults based on the format.
4663          */
4664
4665         return 0;
4666 }
4667
4668 static void
4669 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4670                                  uint64_t tiling_flags)
4671 {
4672         /* Fill GFX8 params */
4673         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4674                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4675
4676                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4677                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4678                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4679                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4680                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4681
4682                 /* XXX fix me for VI */
4683                 tiling_info->gfx8.num_banks = num_banks;
4684                 tiling_info->gfx8.array_mode =
4685                                 DC_ARRAY_2D_TILED_THIN1;
4686                 tiling_info->gfx8.tile_split = tile_split;
4687                 tiling_info->gfx8.bank_width = bankw;
4688                 tiling_info->gfx8.bank_height = bankh;
4689                 tiling_info->gfx8.tile_aspect = mtaspect;
4690                 tiling_info->gfx8.tile_mode =
4691                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4692         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4693                         == DC_ARRAY_1D_TILED_THIN1) {
4694                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4695         }
4696
4697         tiling_info->gfx8.pipe_config =
4698                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4699 }
4700
4701 static void
4702 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4703                                   union dc_tiling_info *tiling_info)
4704 {
4705         tiling_info->gfx9.num_pipes =
4706                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4707         tiling_info->gfx9.num_banks =
4708                 adev->gfx.config.gb_addr_config_fields.num_banks;
4709         tiling_info->gfx9.pipe_interleave =
4710                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4711         tiling_info->gfx9.num_shader_engines =
4712                 adev->gfx.config.gb_addr_config_fields.num_se;
4713         tiling_info->gfx9.max_compressed_frags =
4714                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4715         tiling_info->gfx9.num_rb_per_se =
4716                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4717         tiling_info->gfx9.shaderEnable = 1;
4718         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4719                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4720 }
4721
4722 static int
4723 validate_dcc(struct amdgpu_device *adev,
4724              const enum surface_pixel_format format,
4725              const enum dc_rotation_angle rotation,
4726              const union dc_tiling_info *tiling_info,
4727              const struct dc_plane_dcc_param *dcc,
4728              const struct dc_plane_address *address,
4729              const struct plane_size *plane_size)
4730 {
4731         struct dc *dc = adev->dm.dc;
4732         struct dc_dcc_surface_param input;
4733         struct dc_surface_dcc_cap output;
4734
4735         memset(&input, 0, sizeof(input));
4736         memset(&output, 0, sizeof(output));
4737
4738         if (!dcc->enable)
4739                 return 0;
4740
4741         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4742             !dc->cap_funcs.get_dcc_compression_cap)
4743                 return -EINVAL;
4744
4745         input.format = format;
4746         input.surface_size.width = plane_size->surface_size.width;
4747         input.surface_size.height = plane_size->surface_size.height;
4748         input.swizzle_mode = tiling_info->gfx9.swizzle;
4749
4750         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4751                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4752         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4753                 input.scan = SCAN_DIRECTION_VERTICAL;
4754
4755         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4756                 return -EINVAL;
4757
4758         if (!output.capable)
4759                 return -EINVAL;
4760
4761         if (dcc->independent_64b_blks == 0 &&
4762             output.grph.rgb.independent_64b_blks != 0)
4763                 return -EINVAL;
4764
4765         return 0;
4766 }
4767
4768 static bool
4769 modifier_has_dcc(uint64_t modifier)
4770 {
4771         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4772 }
4773
4774 static unsigned
4775 modifier_gfx9_swizzle_mode(uint64_t modifier)
4776 {
4777         if (modifier == DRM_FORMAT_MOD_LINEAR)
4778                 return 0;
4779
4780         return AMD_FMT_MOD_GET(TILE, modifier);
4781 }
4782
4783 static const struct drm_format_info *
4784 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4785 {
4786         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4787 }
4788
4789 static void
4790 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4791                                     union dc_tiling_info *tiling_info,
4792                                     uint64_t modifier)
4793 {
4794         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4795         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4796         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4797         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4798
4799         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4800
4801         if (!IS_AMD_FMT_MOD(modifier))
4802                 return;
4803
4804         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4805         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4806
4807         if (adev->family >= AMDGPU_FAMILY_NV) {
4808                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4809         } else {
4810                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4811
4812                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4813         }
4814 }
4815
4816 enum dm_micro_swizzle {
4817         MICRO_SWIZZLE_Z = 0,
4818         MICRO_SWIZZLE_S = 1,
4819         MICRO_SWIZZLE_D = 2,
4820         MICRO_SWIZZLE_R = 3
4821 };
4822
4823 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4824                                           uint32_t format,
4825                                           uint64_t modifier)
4826 {
4827         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4828         const struct drm_format_info *info = drm_format_info(format);
4829         int i;
4830
4831         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4832
4833         if (!info)
4834                 return false;
4835
4836         /*
4837          * We always have to allow these modifiers:
4838          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4839          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4840          */
4841         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4842             modifier == DRM_FORMAT_MOD_INVALID) {
4843                 return true;
4844         }
4845
4846         /* Check that the modifier is on the list of the plane's supported modifiers. */
4847         for (i = 0; i < plane->modifier_count; i++) {
4848                 if (modifier == plane->modifiers[i])
4849                         break;
4850         }
4851         if (i == plane->modifier_count)
4852                 return false;
4853
4854         /*
4855          * For D swizzle the canonical modifier depends on the bpp, so check
4856          * it here.
4857          */
4858         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4859             adev->family >= AMDGPU_FAMILY_NV) {
4860                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4861                         return false;
4862         }
4863
4864         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4865             info->cpp[0] < 8)
4866                 return false;
4867
4868         if (modifier_has_dcc(modifier)) {
4869                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4870                 if (info->cpp[0] != 4)
4871                         return false;
4872                 /* We support multi-planar formats, but not when combined with
4873                  * additional DCC metadata planes. */
4874                 if (info->num_planes > 1)
4875                         return false;
4876         }
4877
4878         return true;
4879 }
4880
4881 static void
4882 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4883 {
4884         if (!*mods)
4885                 return;
4886
4887         if (*cap - *size < 1) {
4888                 uint64_t new_cap = *cap * 2;
4889                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4890
4891                 if (!new_mods) {
4892                         kfree(*mods);
4893                         *mods = NULL;
4894                         return;
4895                 }
4896
4897                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4898                 kfree(*mods);
4899                 *mods = new_mods;
4900                 *cap = new_cap;
4901         }
4902
4903         (*mods)[*size] = mod;
4904         *size += 1;
4905 }
4906
4907 static void
4908 add_gfx9_modifiers(const struct amdgpu_device *adev,
4909                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4910 {
4911         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4912         int pipe_xor_bits = min(8, pipes +
4913                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4914         int bank_xor_bits = min(8 - pipe_xor_bits,
4915                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4916         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4917                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4918
4919
4920         if (adev->family == AMDGPU_FAMILY_RV) {
4921                 /* Raven2 and later */
4922                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4923
4924                 /*
4925                  * No _D DCC swizzles yet because we only allow 32bpp, which
4926                  * doesn't support _D on DCN
4927                  */
4928
4929                 if (has_constant_encode) {
4930                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4931                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4932                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4933                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4934                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4935                                     AMD_FMT_MOD_SET(DCC, 1) |
4936                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4937                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4938                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4939                 }
4940
4941                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4942                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4943                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4944                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4945                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4946                             AMD_FMT_MOD_SET(DCC, 1) |
4947                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4949                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4950
4951                 if (has_constant_encode) {
4952                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4953                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4954                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4955                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4956                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4957                                     AMD_FMT_MOD_SET(DCC, 1) |
4958                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4959                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4960                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4961
4962                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4963                                     AMD_FMT_MOD_SET(RB, rb) |
4964                                     AMD_FMT_MOD_SET(PIPE, pipes));
4965                 }
4966
4967                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4968                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4969                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4970                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4971                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4972                             AMD_FMT_MOD_SET(DCC, 1) |
4973                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4974                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4975                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4976                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4977                             AMD_FMT_MOD_SET(RB, rb) |
4978                             AMD_FMT_MOD_SET(PIPE, pipes));
4979         }
4980
4981         /*
4982          * Only supported for 64bpp on Raven, will be filtered on format in
4983          * dm_plane_format_mod_supported.
4984          */
4985         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4986                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4987                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4988                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4989                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4990
4991         if (adev->family == AMDGPU_FAMILY_RV) {
4992                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4993                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4994                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4995                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4996                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4997         }
4998
4999         /*
5000          * Only supported for 64bpp on Raven, will be filtered on format in
5001          * dm_plane_format_mod_supported.
5002          */
5003         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5004                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5005                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5006
5007         if (adev->family == AMDGPU_FAMILY_RV) {
5008                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5010                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5011         }
5012 }
5013
5014 static void
5015 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5016                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5017 {
5018         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5019
5020         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5022                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5023                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5024                     AMD_FMT_MOD_SET(DCC, 1) |
5025                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5026                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5027                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5028
5029         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5031                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5032                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033                     AMD_FMT_MOD_SET(DCC, 1) |
5034                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5035                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5036                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5037                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5038
5039         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5041                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5042                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5043
5044         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5046                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5048
5049
5050         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5051         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5053                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5054
5055         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5056                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5057                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5058 }
5059
5060 static void
5061 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5062                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5063 {
5064         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5065         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5066
5067         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5069                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5070                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5072                     AMD_FMT_MOD_SET(DCC, 1) |
5073                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5074                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5075                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5076                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5077
5078         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5080                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5081                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5083                     AMD_FMT_MOD_SET(DCC, 1) |
5084                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5085                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5086                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5087
5088         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5090                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5091                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5093                     AMD_FMT_MOD_SET(DCC, 1) |
5094                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5095                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5096                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5097                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5098                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5099
5100         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5102                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5103                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5104                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5105                     AMD_FMT_MOD_SET(DCC, 1) |
5106                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5107                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5108                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5109                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5110
5111         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5112                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5113                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5114                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5115                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5116
5117         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5119                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5120                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5121                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5122
5123         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5124         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5126                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5127
5128         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5129                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5130                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5131 }
5132
5133 static int
5134 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5135 {
5136         uint64_t size = 0, capacity = 128;
5137         *mods = NULL;
5138
5139         /* We have not hooked up any pre-GFX9 modifiers. */
5140         if (adev->family < AMDGPU_FAMILY_AI)
5141                 return 0;
5142
5143         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5144
5145         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5146                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5147                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5148                 return *mods ? 0 : -ENOMEM;
5149         }
5150
5151         switch (adev->family) {
5152         case AMDGPU_FAMILY_AI:
5153         case AMDGPU_FAMILY_RV:
5154                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5155                 break;
5156         case AMDGPU_FAMILY_NV:
5157         case AMDGPU_FAMILY_VGH:
5158         case AMDGPU_FAMILY_YC:
5159                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5160                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5161                 else
5162                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5163                 break;
5164         }
5165
5166         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5167
5168         /* INVALID marks the end of the list. */
5169         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5170
5171         if (!*mods)
5172                 return -ENOMEM;
5173
5174         return 0;
5175 }
5176
5177 static int
5178 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5179                                           const struct amdgpu_framebuffer *afb,
5180                                           const enum surface_pixel_format format,
5181                                           const enum dc_rotation_angle rotation,
5182                                           const struct plane_size *plane_size,
5183                                           union dc_tiling_info *tiling_info,
5184                                           struct dc_plane_dcc_param *dcc,
5185                                           struct dc_plane_address *address,
5186                                           const bool force_disable_dcc)
5187 {
5188         const uint64_t modifier = afb->base.modifier;
5189         int ret = 0;
5190
5191         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5192         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5193
5194         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5195                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5196                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5197                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5198
5199                 dcc->enable = 1;
5200                 dcc->meta_pitch = afb->base.pitches[1];
5201                 dcc->independent_64b_blks = independent_64b_blks;
5202                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5203                         if (independent_64b_blks && independent_128b_blks)
5204                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5205                         else if (independent_128b_blks)
5206                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5207                         else if (independent_64b_blks && !independent_128b_blks)
5208                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5209                         else
5210                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5211                 } else {
5212                         if (independent_64b_blks)
5213                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5214                         else
5215                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5216                 }
5217
5218                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5219                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5220         }
5221
5222         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5223         if (ret)
5224                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5225
5226         return ret;
5227 }
5228
5229 static int
5230 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5231                              const struct amdgpu_framebuffer *afb,
5232                              const enum surface_pixel_format format,
5233                              const enum dc_rotation_angle rotation,
5234                              const uint64_t tiling_flags,
5235                              union dc_tiling_info *tiling_info,
5236                              struct plane_size *plane_size,
5237                              struct dc_plane_dcc_param *dcc,
5238                              struct dc_plane_address *address,
5239                              bool tmz_surface,
5240                              bool force_disable_dcc)
5241 {
5242         const struct drm_framebuffer *fb = &afb->base;
5243         int ret;
5244
5245         memset(tiling_info, 0, sizeof(*tiling_info));
5246         memset(plane_size, 0, sizeof(*plane_size));
5247         memset(dcc, 0, sizeof(*dcc));
5248         memset(address, 0, sizeof(*address));
5249
5250         address->tmz_surface = tmz_surface;
5251
5252         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5253                 uint64_t addr = afb->address + fb->offsets[0];
5254
5255                 plane_size->surface_size.x = 0;
5256                 plane_size->surface_size.y = 0;
5257                 plane_size->surface_size.width = fb->width;
5258                 plane_size->surface_size.height = fb->height;
5259                 plane_size->surface_pitch =
5260                         fb->pitches[0] / fb->format->cpp[0];
5261
5262                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5263                 address->grph.addr.low_part = lower_32_bits(addr);
5264                 address->grph.addr.high_part = upper_32_bits(addr);
5265         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5266                 uint64_t luma_addr = afb->address + fb->offsets[0];
5267                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5268
5269                 plane_size->surface_size.x = 0;
5270                 plane_size->surface_size.y = 0;
5271                 plane_size->surface_size.width = fb->width;
5272                 plane_size->surface_size.height = fb->height;
5273                 plane_size->surface_pitch =
5274                         fb->pitches[0] / fb->format->cpp[0];
5275
5276                 plane_size->chroma_size.x = 0;
5277                 plane_size->chroma_size.y = 0;
5278                 /* TODO: set these based on surface format */
5279                 plane_size->chroma_size.width = fb->width / 2;
5280                 plane_size->chroma_size.height = fb->height / 2;
5281
5282                 plane_size->chroma_pitch =
5283                         fb->pitches[1] / fb->format->cpp[1];
5284
5285                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5286                 address->video_progressive.luma_addr.low_part =
5287                         lower_32_bits(luma_addr);
5288                 address->video_progressive.luma_addr.high_part =
5289                         upper_32_bits(luma_addr);
5290                 address->video_progressive.chroma_addr.low_part =
5291                         lower_32_bits(chroma_addr);
5292                 address->video_progressive.chroma_addr.high_part =
5293                         upper_32_bits(chroma_addr);
5294         }
5295
5296         if (adev->family >= AMDGPU_FAMILY_AI) {
5297                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5298                                                                 rotation, plane_size,
5299                                                                 tiling_info, dcc,
5300                                                                 address,
5301                                                                 force_disable_dcc);
5302                 if (ret)
5303                         return ret;
5304         } else {
5305                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5306         }
5307
5308         return 0;
5309 }
5310
5311 static void
5312 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5313                                bool *per_pixel_alpha, bool *global_alpha,
5314                                int *global_alpha_value)
5315 {
5316         *per_pixel_alpha = false;
5317         *global_alpha = false;
5318         *global_alpha_value = 0xff;
5319
5320         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5321                 return;
5322
5323         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5324                 static const uint32_t alpha_formats[] = {
5325                         DRM_FORMAT_ARGB8888,
5326                         DRM_FORMAT_RGBA8888,
5327                         DRM_FORMAT_ABGR8888,
5328                 };
5329                 uint32_t format = plane_state->fb->format->format;
5330                 unsigned int i;
5331
5332                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5333                         if (format == alpha_formats[i]) {
5334                                 *per_pixel_alpha = true;
5335                                 break;
5336                         }
5337                 }
5338         }
5339
5340         if (plane_state->alpha < 0xffff) {
5341                 *global_alpha = true;
5342                 *global_alpha_value = plane_state->alpha >> 8;
5343         }
5344 }
5345
5346 static int
5347 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5348                             const enum surface_pixel_format format,
5349                             enum dc_color_space *color_space)
5350 {
5351         bool full_range;
5352
5353         *color_space = COLOR_SPACE_SRGB;
5354
5355         /* DRM color properties only affect non-RGB formats. */
5356         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5357                 return 0;
5358
5359         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5360
5361         switch (plane_state->color_encoding) {
5362         case DRM_COLOR_YCBCR_BT601:
5363                 if (full_range)
5364                         *color_space = COLOR_SPACE_YCBCR601;
5365                 else
5366                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5367                 break;
5368
5369         case DRM_COLOR_YCBCR_BT709:
5370                 if (full_range)
5371                         *color_space = COLOR_SPACE_YCBCR709;
5372                 else
5373                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5374                 break;
5375
5376         case DRM_COLOR_YCBCR_BT2020:
5377                 if (full_range)
5378                         *color_space = COLOR_SPACE_2020_YCBCR;
5379                 else
5380                         return -EINVAL;
5381                 break;
5382
5383         default:
5384                 return -EINVAL;
5385         }
5386
5387         return 0;
5388 }
5389
5390 static int
5391 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5392                             const struct drm_plane_state *plane_state,
5393                             const uint64_t tiling_flags,
5394                             struct dc_plane_info *plane_info,
5395                             struct dc_plane_address *address,
5396                             bool tmz_surface,
5397                             bool force_disable_dcc)
5398 {
5399         const struct drm_framebuffer *fb = plane_state->fb;
5400         const struct amdgpu_framebuffer *afb =
5401                 to_amdgpu_framebuffer(plane_state->fb);
5402         int ret;
5403
5404         memset(plane_info, 0, sizeof(*plane_info));
5405
5406         switch (fb->format->format) {
5407         case DRM_FORMAT_C8:
5408                 plane_info->format =
5409                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5410                 break;
5411         case DRM_FORMAT_RGB565:
5412                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5413                 break;
5414         case DRM_FORMAT_XRGB8888:
5415         case DRM_FORMAT_ARGB8888:
5416                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5417                 break;
5418         case DRM_FORMAT_XRGB2101010:
5419         case DRM_FORMAT_ARGB2101010:
5420                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5421                 break;
5422         case DRM_FORMAT_XBGR2101010:
5423         case DRM_FORMAT_ABGR2101010:
5424                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5425                 break;
5426         case DRM_FORMAT_XBGR8888:
5427         case DRM_FORMAT_ABGR8888:
5428                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5429                 break;
5430         case DRM_FORMAT_NV21:
5431                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5432                 break;
5433         case DRM_FORMAT_NV12:
5434                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5435                 break;
5436         case DRM_FORMAT_P010:
5437                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5438                 break;
5439         case DRM_FORMAT_XRGB16161616F:
5440         case DRM_FORMAT_ARGB16161616F:
5441                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5442                 break;
5443         case DRM_FORMAT_XBGR16161616F:
5444         case DRM_FORMAT_ABGR16161616F:
5445                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5446                 break;
5447         case DRM_FORMAT_XRGB16161616:
5448         case DRM_FORMAT_ARGB16161616:
5449                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5450                 break;
5451         case DRM_FORMAT_XBGR16161616:
5452         case DRM_FORMAT_ABGR16161616:
5453                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5454                 break;
5455         default:
5456                 DRM_ERROR(
5457                         "Unsupported screen format %p4cc\n",
5458                         &fb->format->format);
5459                 return -EINVAL;
5460         }
5461
5462         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5463         case DRM_MODE_ROTATE_0:
5464                 plane_info->rotation = ROTATION_ANGLE_0;
5465                 break;
5466         case DRM_MODE_ROTATE_90:
5467                 plane_info->rotation = ROTATION_ANGLE_90;
5468                 break;
5469         case DRM_MODE_ROTATE_180:
5470                 plane_info->rotation = ROTATION_ANGLE_180;
5471                 break;
5472         case DRM_MODE_ROTATE_270:
5473                 plane_info->rotation = ROTATION_ANGLE_270;
5474                 break;
5475         default:
5476                 plane_info->rotation = ROTATION_ANGLE_0;
5477                 break;
5478         }
5479
5480         plane_info->visible = true;
5481         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5482
5483         plane_info->layer_index = 0;
5484
5485         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5486                                           &plane_info->color_space);
5487         if (ret)
5488                 return ret;
5489
5490         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5491                                            plane_info->rotation, tiling_flags,
5492                                            &plane_info->tiling_info,
5493                                            &plane_info->plane_size,
5494                                            &plane_info->dcc, address, tmz_surface,
5495                                            force_disable_dcc);
5496         if (ret)
5497                 return ret;
5498
5499         fill_blending_from_plane_state(
5500                 plane_state, &plane_info->per_pixel_alpha,
5501                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5502
5503         return 0;
5504 }
5505
5506 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5507                                     struct dc_plane_state *dc_plane_state,
5508                                     struct drm_plane_state *plane_state,
5509                                     struct drm_crtc_state *crtc_state)
5510 {
5511         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5512         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5513         struct dc_scaling_info scaling_info;
5514         struct dc_plane_info plane_info;
5515         int ret;
5516         bool force_disable_dcc = false;
5517
5518         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5519         if (ret)
5520                 return ret;
5521
5522         dc_plane_state->src_rect = scaling_info.src_rect;
5523         dc_plane_state->dst_rect = scaling_info.dst_rect;
5524         dc_plane_state->clip_rect = scaling_info.clip_rect;
5525         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5526
5527         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5528         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5529                                           afb->tiling_flags,
5530                                           &plane_info,
5531                                           &dc_plane_state->address,
5532                                           afb->tmz_surface,
5533                                           force_disable_dcc);
5534         if (ret)
5535                 return ret;
5536
5537         dc_plane_state->format = plane_info.format;
5538         dc_plane_state->color_space = plane_info.color_space;
5539         dc_plane_state->format = plane_info.format;
5540         dc_plane_state->plane_size = plane_info.plane_size;
5541         dc_plane_state->rotation = plane_info.rotation;
5542         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5543         dc_plane_state->stereo_format = plane_info.stereo_format;
5544         dc_plane_state->tiling_info = plane_info.tiling_info;
5545         dc_plane_state->visible = plane_info.visible;
5546         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5547         dc_plane_state->global_alpha = plane_info.global_alpha;
5548         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5549         dc_plane_state->dcc = plane_info.dcc;
5550         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5551         dc_plane_state->flip_int_enabled = true;
5552
5553         /*
5554          * Always set input transfer function, since plane state is refreshed
5555          * every time.
5556          */
5557         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5558         if (ret)
5559                 return ret;
5560
5561         return 0;
5562 }
5563
5564 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5565                                            const struct dm_connector_state *dm_state,
5566                                            struct dc_stream_state *stream)
5567 {
5568         enum amdgpu_rmx_type rmx_type;
5569
5570         struct rect src = { 0 }; /* viewport in composition space*/
5571         struct rect dst = { 0 }; /* stream addressable area */
5572
5573         /* no mode. nothing to be done */
5574         if (!mode)
5575                 return;
5576
5577         /* Full screen scaling by default */
5578         src.width = mode->hdisplay;
5579         src.height = mode->vdisplay;
5580         dst.width = stream->timing.h_addressable;
5581         dst.height = stream->timing.v_addressable;
5582
5583         if (dm_state) {
5584                 rmx_type = dm_state->scaling;
5585                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5586                         if (src.width * dst.height <
5587                                         src.height * dst.width) {
5588                                 /* height needs less upscaling/more downscaling */
5589                                 dst.width = src.width *
5590                                                 dst.height / src.height;
5591                         } else {
5592                                 /* width needs less upscaling/more downscaling */
5593                                 dst.height = src.height *
5594                                                 dst.width / src.width;
5595                         }
5596                 } else if (rmx_type == RMX_CENTER) {
5597                         dst = src;
5598                 }
5599
5600                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5601                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5602
5603                 if (dm_state->underscan_enable) {
5604                         dst.x += dm_state->underscan_hborder / 2;
5605                         dst.y += dm_state->underscan_vborder / 2;
5606                         dst.width -= dm_state->underscan_hborder;
5607                         dst.height -= dm_state->underscan_vborder;
5608                 }
5609         }
5610
5611         stream->src = src;
5612         stream->dst = dst;
5613
5614         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5615                       dst.x, dst.y, dst.width, dst.height);
5616
5617 }
5618
5619 static enum dc_color_depth
5620 convert_color_depth_from_display_info(const struct drm_connector *connector,
5621                                       bool is_y420, int requested_bpc)
5622 {
5623         uint8_t bpc;
5624
5625         if (is_y420) {
5626                 bpc = 8;
5627
5628                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5629                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5630                         bpc = 16;
5631                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5632                         bpc = 12;
5633                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5634                         bpc = 10;
5635         } else {
5636                 bpc = (uint8_t)connector->display_info.bpc;
5637                 /* Assume 8 bpc by default if no bpc is specified. */
5638                 bpc = bpc ? bpc : 8;
5639         }
5640
5641         if (requested_bpc > 0) {
5642                 /*
5643                  * Cap display bpc based on the user requested value.
5644                  *
5645                  * The value for state->max_bpc may not correctly updated
5646                  * depending on when the connector gets added to the state
5647                  * or if this was called outside of atomic check, so it
5648                  * can't be used directly.
5649                  */
5650                 bpc = min_t(u8, bpc, requested_bpc);
5651
5652                 /* Round down to the nearest even number. */
5653                 bpc = bpc - (bpc & 1);
5654         }
5655
5656         switch (bpc) {
5657         case 0:
5658                 /*
5659                  * Temporary Work around, DRM doesn't parse color depth for
5660                  * EDID revision before 1.4
5661                  * TODO: Fix edid parsing
5662                  */
5663                 return COLOR_DEPTH_888;
5664         case 6:
5665                 return COLOR_DEPTH_666;
5666         case 8:
5667                 return COLOR_DEPTH_888;
5668         case 10:
5669                 return COLOR_DEPTH_101010;
5670         case 12:
5671                 return COLOR_DEPTH_121212;
5672         case 14:
5673                 return COLOR_DEPTH_141414;
5674         case 16:
5675                 return COLOR_DEPTH_161616;
5676         default:
5677                 return COLOR_DEPTH_UNDEFINED;
5678         }
5679 }
5680
5681 static enum dc_aspect_ratio
5682 get_aspect_ratio(const struct drm_display_mode *mode_in)
5683 {
5684         /* 1-1 mapping, since both enums follow the HDMI spec. */
5685         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5686 }
5687
5688 static enum dc_color_space
5689 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5690 {
5691         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5692
5693         switch (dc_crtc_timing->pixel_encoding) {
5694         case PIXEL_ENCODING_YCBCR422:
5695         case PIXEL_ENCODING_YCBCR444:
5696         case PIXEL_ENCODING_YCBCR420:
5697         {
5698                 /*
5699                  * 27030khz is the separation point between HDTV and SDTV
5700                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5701                  * respectively
5702                  */
5703                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5704                         if (dc_crtc_timing->flags.Y_ONLY)
5705                                 color_space =
5706                                         COLOR_SPACE_YCBCR709_LIMITED;
5707                         else
5708                                 color_space = COLOR_SPACE_YCBCR709;
5709                 } else {
5710                         if (dc_crtc_timing->flags.Y_ONLY)
5711                                 color_space =
5712                                         COLOR_SPACE_YCBCR601_LIMITED;
5713                         else
5714                                 color_space = COLOR_SPACE_YCBCR601;
5715                 }
5716
5717         }
5718         break;
5719         case PIXEL_ENCODING_RGB:
5720                 color_space = COLOR_SPACE_SRGB;
5721                 break;
5722
5723         default:
5724                 WARN_ON(1);
5725                 break;
5726         }
5727
5728         return color_space;
5729 }
5730
5731 static bool adjust_colour_depth_from_display_info(
5732         struct dc_crtc_timing *timing_out,
5733         const struct drm_display_info *info)
5734 {
5735         enum dc_color_depth depth = timing_out->display_color_depth;
5736         int normalized_clk;
5737         do {
5738                 normalized_clk = timing_out->pix_clk_100hz / 10;
5739                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5740                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5741                         normalized_clk /= 2;
5742                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5743                 switch (depth) {
5744                 case COLOR_DEPTH_888:
5745                         break;
5746                 case COLOR_DEPTH_101010:
5747                         normalized_clk = (normalized_clk * 30) / 24;
5748                         break;
5749                 case COLOR_DEPTH_121212:
5750                         normalized_clk = (normalized_clk * 36) / 24;
5751                         break;
5752                 case COLOR_DEPTH_161616:
5753                         normalized_clk = (normalized_clk * 48) / 24;
5754                         break;
5755                 default:
5756                         /* The above depths are the only ones valid for HDMI. */
5757                         return false;
5758                 }
5759                 if (normalized_clk <= info->max_tmds_clock) {
5760                         timing_out->display_color_depth = depth;
5761                         return true;
5762                 }
5763         } while (--depth > COLOR_DEPTH_666);
5764         return false;
5765 }
5766
5767 static void fill_stream_properties_from_drm_display_mode(
5768         struct dc_stream_state *stream,
5769         const struct drm_display_mode *mode_in,
5770         const struct drm_connector *connector,
5771         const struct drm_connector_state *connector_state,
5772         const struct dc_stream_state *old_stream,
5773         int requested_bpc)
5774 {
5775         struct dc_crtc_timing *timing_out = &stream->timing;
5776         const struct drm_display_info *info = &connector->display_info;
5777         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5778         struct hdmi_vendor_infoframe hv_frame;
5779         struct hdmi_avi_infoframe avi_frame;
5780
5781         memset(&hv_frame, 0, sizeof(hv_frame));
5782         memset(&avi_frame, 0, sizeof(avi_frame));
5783
5784         timing_out->h_border_left = 0;
5785         timing_out->h_border_right = 0;
5786         timing_out->v_border_top = 0;
5787         timing_out->v_border_bottom = 0;
5788         /* TODO: un-hardcode */
5789         if (drm_mode_is_420_only(info, mode_in)
5790                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5791                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5792         else if (drm_mode_is_420_also(info, mode_in)
5793                         && aconnector->force_yuv420_output)
5794                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5795         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5796                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5797                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5798         else
5799                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5800
5801         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5802         timing_out->display_color_depth = convert_color_depth_from_display_info(
5803                 connector,
5804                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5805                 requested_bpc);
5806         timing_out->scan_type = SCANNING_TYPE_NODATA;
5807         timing_out->hdmi_vic = 0;
5808
5809         if(old_stream) {
5810                 timing_out->vic = old_stream->timing.vic;
5811                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5812                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5813         } else {
5814                 timing_out->vic = drm_match_cea_mode(mode_in);
5815                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5816                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5817                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5818                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5819         }
5820
5821         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5822                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5823                 timing_out->vic = avi_frame.video_code;
5824                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5825                 timing_out->hdmi_vic = hv_frame.vic;
5826         }
5827
5828         if (is_freesync_video_mode(mode_in, aconnector)) {
5829                 timing_out->h_addressable = mode_in->hdisplay;
5830                 timing_out->h_total = mode_in->htotal;
5831                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5832                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5833                 timing_out->v_total = mode_in->vtotal;
5834                 timing_out->v_addressable = mode_in->vdisplay;
5835                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5836                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5837                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5838         } else {
5839                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5840                 timing_out->h_total = mode_in->crtc_htotal;
5841                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5842                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5843                 timing_out->v_total = mode_in->crtc_vtotal;
5844                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5845                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5846                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5847                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5848         }
5849
5850         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5851
5852         stream->output_color_space = get_output_color_space(timing_out);
5853
5854         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5855         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5856         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5857                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5858                     drm_mode_is_420_also(info, mode_in) &&
5859                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5860                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5861                         adjust_colour_depth_from_display_info(timing_out, info);
5862                 }
5863         }
5864 }
5865
5866 static void fill_audio_info(struct audio_info *audio_info,
5867                             const struct drm_connector *drm_connector,
5868                             const struct dc_sink *dc_sink)
5869 {
5870         int i = 0;
5871         int cea_revision = 0;
5872         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5873
5874         audio_info->manufacture_id = edid_caps->manufacturer_id;
5875         audio_info->product_id = edid_caps->product_id;
5876
5877         cea_revision = drm_connector->display_info.cea_rev;
5878
5879         strscpy(audio_info->display_name,
5880                 edid_caps->display_name,
5881                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5882
5883         if (cea_revision >= 3) {
5884                 audio_info->mode_count = edid_caps->audio_mode_count;
5885
5886                 for (i = 0; i < audio_info->mode_count; ++i) {
5887                         audio_info->modes[i].format_code =
5888                                         (enum audio_format_code)
5889                                         (edid_caps->audio_modes[i].format_code);
5890                         audio_info->modes[i].channel_count =
5891                                         edid_caps->audio_modes[i].channel_count;
5892                         audio_info->modes[i].sample_rates.all =
5893                                         edid_caps->audio_modes[i].sample_rate;
5894                         audio_info->modes[i].sample_size =
5895                                         edid_caps->audio_modes[i].sample_size;
5896                 }
5897         }
5898
5899         audio_info->flags.all = edid_caps->speaker_flags;
5900
5901         /* TODO: We only check for the progressive mode, check for interlace mode too */
5902         if (drm_connector->latency_present[0]) {
5903                 audio_info->video_latency = drm_connector->video_latency[0];
5904                 audio_info->audio_latency = drm_connector->audio_latency[0];
5905         }
5906
5907         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5908
5909 }
5910
5911 static void
5912 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5913                                       struct drm_display_mode *dst_mode)
5914 {
5915         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5916         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5917         dst_mode->crtc_clock = src_mode->crtc_clock;
5918         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5919         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5920         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5921         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5922         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5923         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5924         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5925         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5926         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5927         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5928         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5929 }
5930
5931 static void
5932 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5933                                         const struct drm_display_mode *native_mode,
5934                                         bool scale_enabled)
5935 {
5936         if (scale_enabled) {
5937                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5938         } else if (native_mode->clock == drm_mode->clock &&
5939                         native_mode->htotal == drm_mode->htotal &&
5940                         native_mode->vtotal == drm_mode->vtotal) {
5941                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5942         } else {
5943                 /* no scaling nor amdgpu inserted, no need to patch */
5944         }
5945 }
5946
5947 static struct dc_sink *
5948 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5949 {
5950         struct dc_sink_init_data sink_init_data = { 0 };
5951         struct dc_sink *sink = NULL;
5952         sink_init_data.link = aconnector->dc_link;
5953         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5954
5955         sink = dc_sink_create(&sink_init_data);
5956         if (!sink) {
5957                 DRM_ERROR("Failed to create sink!\n");
5958                 return NULL;
5959         }
5960         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5961
5962         return sink;
5963 }
5964
5965 static void set_multisync_trigger_params(
5966                 struct dc_stream_state *stream)
5967 {
5968         struct dc_stream_state *master = NULL;
5969
5970         if (stream->triggered_crtc_reset.enabled) {
5971                 master = stream->triggered_crtc_reset.event_source;
5972                 stream->triggered_crtc_reset.event =
5973                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5974                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5975                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5976         }
5977 }
5978
5979 static void set_master_stream(struct dc_stream_state *stream_set[],
5980                               int stream_count)
5981 {
5982         int j, highest_rfr = 0, master_stream = 0;
5983
5984         for (j = 0;  j < stream_count; j++) {
5985                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5986                         int refresh_rate = 0;
5987
5988                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5989                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5990                         if (refresh_rate > highest_rfr) {
5991                                 highest_rfr = refresh_rate;
5992                                 master_stream = j;
5993                         }
5994                 }
5995         }
5996         for (j = 0;  j < stream_count; j++) {
5997                 if (stream_set[j])
5998                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5999         }
6000 }
6001
6002 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6003 {
6004         int i = 0;
6005         struct dc_stream_state *stream;
6006
6007         if (context->stream_count < 2)
6008                 return;
6009         for (i = 0; i < context->stream_count ; i++) {
6010                 if (!context->streams[i])
6011                         continue;
6012                 /*
6013                  * TODO: add a function to read AMD VSDB bits and set
6014                  * crtc_sync_master.multi_sync_enabled flag
6015                  * For now it's set to false
6016                  */
6017         }
6018
6019         set_master_stream(context->streams, context->stream_count);
6020
6021         for (i = 0; i < context->stream_count ; i++) {
6022                 stream = context->streams[i];
6023
6024                 if (!stream)
6025                         continue;
6026
6027                 set_multisync_trigger_params(stream);
6028         }
6029 }
6030
6031 #if defined(CONFIG_DRM_AMD_DC_DCN)
6032 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6033                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6034                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6035 {
6036         stream->timing.flags.DSC = 0;
6037
6038         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6039                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6040                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6041                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6042                                       dsc_caps);
6043         }
6044 }
6045
6046 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6047                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6048                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6049 {
6050         struct drm_connector *drm_connector = &aconnector->base;
6051         uint32_t link_bandwidth_kbps;
6052         uint32_t max_dsc_target_bpp_limit_override = 0;
6053
6054         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6055                                                         dc_link_get_link_cap(aconnector->dc_link));
6056
6057         if (stream->link && stream->link->local_sink)
6058                 max_dsc_target_bpp_limit_override =
6059                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6060         
6061         /* Set DSC policy according to dsc_clock_en */
6062         dc_dsc_policy_set_enable_dsc_when_not_needed(
6063                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6064
6065         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6066
6067                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6068                                                 dsc_caps,
6069                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6070                                                 max_dsc_target_bpp_limit_override,
6071                                                 link_bandwidth_kbps,
6072                                                 &stream->timing,
6073                                                 &stream->timing.dsc_cfg)) {
6074                         stream->timing.flags.DSC = 1;
6075                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6076                 }
6077         }
6078
6079         /* Overwrite the stream flag if DSC is enabled through debugfs */
6080         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6081                 stream->timing.flags.DSC = 1;
6082
6083         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6084                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6085
6086         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6087                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6088
6089         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6090                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6091 }
6092 #endif /* CONFIG_DRM_AMD_DC_DCN */
6093
6094 /**
6095  * DOC: FreeSync Video
6096  *
6097  * When a userspace application wants to play a video, the content follows a
6098  * standard format definition that usually specifies the FPS for that format.
6099  * The below list illustrates some video format and the expected FPS,
6100  * respectively:
6101  *
6102  * - TV/NTSC (23.976 FPS)
6103  * - Cinema (24 FPS)
6104  * - TV/PAL (25 FPS)
6105  * - TV/NTSC (29.97 FPS)
6106  * - TV/NTSC (30 FPS)
6107  * - Cinema HFR (48 FPS)
6108  * - TV/PAL (50 FPS)
6109  * - Commonly used (60 FPS)
6110  * - Multiples of 24 (48,72,96,120 FPS)
6111  *
6112  * The list of standards video format is not huge and can be added to the
6113  * connector modeset list beforehand. With that, userspace can leverage
6114  * FreeSync to extends the front porch in order to attain the target refresh
6115  * rate. Such a switch will happen seamlessly, without screen blanking or
6116  * reprogramming of the output in any other way. If the userspace requests a
6117  * modesetting change compatible with FreeSync modes that only differ in the
6118  * refresh rate, DC will skip the full update and avoid blink during the
6119  * transition. For example, the video player can change the modesetting from
6120  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6121  * causing any display blink. This same concept can be applied to a mode
6122  * setting change.
6123  */
6124 static struct drm_display_mode *
6125 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6126                           bool use_probed_modes)
6127 {
6128         struct drm_display_mode *m, *m_pref = NULL;
6129         u16 current_refresh, highest_refresh;
6130         struct list_head *list_head = use_probed_modes ?
6131                                                     &aconnector->base.probed_modes :
6132                                                     &aconnector->base.modes;
6133
6134         if (aconnector->freesync_vid_base.clock != 0)
6135                 return &aconnector->freesync_vid_base;
6136
6137         /* Find the preferred mode */
6138         list_for_each_entry (m, list_head, head) {
6139                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6140                         m_pref = m;
6141                         break;
6142                 }
6143         }
6144
6145         if (!m_pref) {
6146                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6147                 m_pref = list_first_entry_or_null(
6148                         &aconnector->base.modes, struct drm_display_mode, head);
6149                 if (!m_pref) {
6150                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6151                         return NULL;
6152                 }
6153         }
6154
6155         highest_refresh = drm_mode_vrefresh(m_pref);
6156
6157         /*
6158          * Find the mode with highest refresh rate with same resolution.
6159          * For some monitors, preferred mode is not the mode with highest
6160          * supported refresh rate.
6161          */
6162         list_for_each_entry (m, list_head, head) {
6163                 current_refresh  = drm_mode_vrefresh(m);
6164
6165                 if (m->hdisplay == m_pref->hdisplay &&
6166                     m->vdisplay == m_pref->vdisplay &&
6167                     highest_refresh < current_refresh) {
6168                         highest_refresh = current_refresh;
6169                         m_pref = m;
6170                 }
6171         }
6172
6173         aconnector->freesync_vid_base = *m_pref;
6174         return m_pref;
6175 }
6176
6177 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6178                                    struct amdgpu_dm_connector *aconnector)
6179 {
6180         struct drm_display_mode *high_mode;
6181         int timing_diff;
6182
6183         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6184         if (!high_mode || !mode)
6185                 return false;
6186
6187         timing_diff = high_mode->vtotal - mode->vtotal;
6188
6189         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6190             high_mode->hdisplay != mode->hdisplay ||
6191             high_mode->vdisplay != mode->vdisplay ||
6192             high_mode->hsync_start != mode->hsync_start ||
6193             high_mode->hsync_end != mode->hsync_end ||
6194             high_mode->htotal != mode->htotal ||
6195             high_mode->hskew != mode->hskew ||
6196             high_mode->vscan != mode->vscan ||
6197             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6198             high_mode->vsync_end - mode->vsync_end != timing_diff)
6199                 return false;
6200         else
6201                 return true;
6202 }
6203
6204 static struct dc_stream_state *
6205 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6206                        const struct drm_display_mode *drm_mode,
6207                        const struct dm_connector_state *dm_state,
6208                        const struct dc_stream_state *old_stream,
6209                        int requested_bpc)
6210 {
6211         struct drm_display_mode *preferred_mode = NULL;
6212         struct drm_connector *drm_connector;
6213         const struct drm_connector_state *con_state =
6214                 dm_state ? &dm_state->base : NULL;
6215         struct dc_stream_state *stream = NULL;
6216         struct drm_display_mode mode = *drm_mode;
6217         struct drm_display_mode saved_mode;
6218         struct drm_display_mode *freesync_mode = NULL;
6219         bool native_mode_found = false;
6220         bool recalculate_timing = false;
6221         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6222         int mode_refresh;
6223         int preferred_refresh = 0;
6224 #if defined(CONFIG_DRM_AMD_DC_DCN)
6225         struct dsc_dec_dpcd_caps dsc_caps;
6226 #endif
6227         struct dc_sink *sink = NULL;
6228
6229         memset(&saved_mode, 0, sizeof(saved_mode));
6230
6231         if (aconnector == NULL) {
6232                 DRM_ERROR("aconnector is NULL!\n");
6233                 return stream;
6234         }
6235
6236         drm_connector = &aconnector->base;
6237
6238         if (!aconnector->dc_sink) {
6239                 sink = create_fake_sink(aconnector);
6240                 if (!sink)
6241                         return stream;
6242         } else {
6243                 sink = aconnector->dc_sink;
6244                 dc_sink_retain(sink);
6245         }
6246
6247         stream = dc_create_stream_for_sink(sink);
6248
6249         if (stream == NULL) {
6250                 DRM_ERROR("Failed to create stream for sink!\n");
6251                 goto finish;
6252         }
6253
6254         stream->dm_stream_context = aconnector;
6255
6256         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6257                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6258
6259         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6260                 /* Search for preferred mode */
6261                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6262                         native_mode_found = true;
6263                         break;
6264                 }
6265         }
6266         if (!native_mode_found)
6267                 preferred_mode = list_first_entry_or_null(
6268                                 &aconnector->base.modes,
6269                                 struct drm_display_mode,
6270                                 head);
6271
6272         mode_refresh = drm_mode_vrefresh(&mode);
6273
6274         if (preferred_mode == NULL) {
6275                 /*
6276                  * This may not be an error, the use case is when we have no
6277                  * usermode calls to reset and set mode upon hotplug. In this
6278                  * case, we call set mode ourselves to restore the previous mode
6279                  * and the modelist may not be filled in in time.
6280                  */
6281                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6282         } else {
6283                 recalculate_timing = amdgpu_freesync_vid_mode &&
6284                                  is_freesync_video_mode(&mode, aconnector);
6285                 if (recalculate_timing) {
6286                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6287                         saved_mode = mode;
6288                         mode = *freesync_mode;
6289                 } else {
6290                         decide_crtc_timing_for_drm_display_mode(
6291                                 &mode, preferred_mode, scale);
6292
6293                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6294                 }
6295         }
6296
6297         if (recalculate_timing)
6298                 drm_mode_set_crtcinfo(&saved_mode, 0);
6299         else if (!dm_state)
6300                 drm_mode_set_crtcinfo(&mode, 0);
6301
6302        /*
6303         * If scaling is enabled and refresh rate didn't change
6304         * we copy the vic and polarities of the old timings
6305         */
6306         if (!scale || mode_refresh != preferred_refresh)
6307                 fill_stream_properties_from_drm_display_mode(
6308                         stream, &mode, &aconnector->base, con_state, NULL,
6309                         requested_bpc);
6310         else
6311                 fill_stream_properties_from_drm_display_mode(
6312                         stream, &mode, &aconnector->base, con_state, old_stream,
6313                         requested_bpc);
6314
6315 #if defined(CONFIG_DRM_AMD_DC_DCN)
6316         /* SST DSC determination policy */
6317         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6318         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6319                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6320 #endif
6321
6322         update_stream_scaling_settings(&mode, dm_state, stream);
6323
6324         fill_audio_info(
6325                 &stream->audio_info,
6326                 drm_connector,
6327                 sink);
6328
6329         update_stream_signal(stream, sink);
6330
6331         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6332                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6333
6334         if (stream->link->psr_settings.psr_feature_enabled) {
6335                 //
6336                 // should decide stream support vsc sdp colorimetry capability
6337                 // before building vsc info packet
6338                 //
6339                 stream->use_vsc_sdp_for_colorimetry = false;
6340                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6341                         stream->use_vsc_sdp_for_colorimetry =
6342                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6343                 } else {
6344                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6345                                 stream->use_vsc_sdp_for_colorimetry = true;
6346                 }
6347                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6348                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6349
6350         }
6351 finish:
6352         dc_sink_release(sink);
6353
6354         return stream;
6355 }
6356
6357 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6358 {
6359         drm_crtc_cleanup(crtc);
6360         kfree(crtc);
6361 }
6362
6363 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6364                                   struct drm_crtc_state *state)
6365 {
6366         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6367
6368         /* TODO Destroy dc_stream objects are stream object is flattened */
6369         if (cur->stream)
6370                 dc_stream_release(cur->stream);
6371
6372
6373         __drm_atomic_helper_crtc_destroy_state(state);
6374
6375
6376         kfree(state);
6377 }
6378
6379 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6380 {
6381         struct dm_crtc_state *state;
6382
6383         if (crtc->state)
6384                 dm_crtc_destroy_state(crtc, crtc->state);
6385
6386         state = kzalloc(sizeof(*state), GFP_KERNEL);
6387         if (WARN_ON(!state))
6388                 return;
6389
6390         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6391 }
6392
6393 static struct drm_crtc_state *
6394 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6395 {
6396         struct dm_crtc_state *state, *cur;
6397
6398         cur = to_dm_crtc_state(crtc->state);
6399
6400         if (WARN_ON(!crtc->state))
6401                 return NULL;
6402
6403         state = kzalloc(sizeof(*state), GFP_KERNEL);
6404         if (!state)
6405                 return NULL;
6406
6407         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6408
6409         if (cur->stream) {
6410                 state->stream = cur->stream;
6411                 dc_stream_retain(state->stream);
6412         }
6413
6414         state->active_planes = cur->active_planes;
6415         state->vrr_infopacket = cur->vrr_infopacket;
6416         state->abm_level = cur->abm_level;
6417         state->vrr_supported = cur->vrr_supported;
6418         state->freesync_config = cur->freesync_config;
6419         state->cm_has_degamma = cur->cm_has_degamma;
6420         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6421         state->force_dpms_off = cur->force_dpms_off;
6422         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6423
6424         return &state->base;
6425 }
6426
6427 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6428 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6429 {
6430         crtc_debugfs_init(crtc);
6431
6432         return 0;
6433 }
6434 #endif
6435
6436 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6437 {
6438         enum dc_irq_source irq_source;
6439         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6440         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6441         int rc;
6442
6443         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6444
6445         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6446
6447         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6448                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6449         return rc;
6450 }
6451
6452 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6453 {
6454         enum dc_irq_source irq_source;
6455         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6456         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6457         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6458 #if defined(CONFIG_DRM_AMD_DC_DCN)
6459         struct amdgpu_display_manager *dm = &adev->dm;
6460         struct vblank_control_work *work;
6461 #endif
6462         int rc = 0;
6463
6464         if (enable) {
6465                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6466                 if (amdgpu_dm_vrr_active(acrtc_state))
6467                         rc = dm_set_vupdate_irq(crtc, true);
6468         } else {
6469                 /* vblank irq off -> vupdate irq off */
6470                 rc = dm_set_vupdate_irq(crtc, false);
6471         }
6472
6473         if (rc)
6474                 return rc;
6475
6476         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6477
6478         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6479                 return -EBUSY;
6480
6481         if (amdgpu_in_reset(adev))
6482                 return 0;
6483
6484 #if defined(CONFIG_DRM_AMD_DC_DCN)
6485         if (dm->vblank_control_workqueue) {
6486                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6487                 if (!work)
6488                         return -ENOMEM;
6489
6490                 INIT_WORK(&work->work, vblank_control_worker);
6491                 work->dm = dm;
6492                 work->acrtc = acrtc;
6493                 work->enable = enable;
6494
6495                 if (acrtc_state->stream) {
6496                         dc_stream_retain(acrtc_state->stream);
6497                         work->stream = acrtc_state->stream;
6498                 }
6499
6500                 queue_work(dm->vblank_control_workqueue, &work->work);
6501         }
6502 #endif
6503
6504         return 0;
6505 }
6506
6507 static int dm_enable_vblank(struct drm_crtc *crtc)
6508 {
6509         return dm_set_vblank(crtc, true);
6510 }
6511
6512 static void dm_disable_vblank(struct drm_crtc *crtc)
6513 {
6514         dm_set_vblank(crtc, false);
6515 }
6516
6517 /* Implemented only the options currently availible for the driver */
6518 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6519         .reset = dm_crtc_reset_state,
6520         .destroy = amdgpu_dm_crtc_destroy,
6521         .set_config = drm_atomic_helper_set_config,
6522         .page_flip = drm_atomic_helper_page_flip,
6523         .atomic_duplicate_state = dm_crtc_duplicate_state,
6524         .atomic_destroy_state = dm_crtc_destroy_state,
6525         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6526         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6527         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6528         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6529         .enable_vblank = dm_enable_vblank,
6530         .disable_vblank = dm_disable_vblank,
6531         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6532 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6533         .late_register = amdgpu_dm_crtc_late_register,
6534 #endif
6535 };
6536
6537 static enum drm_connector_status
6538 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6539 {
6540         bool connected;
6541         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6542
6543         /*
6544          * Notes:
6545          * 1. This interface is NOT called in context of HPD irq.
6546          * 2. This interface *is called* in context of user-mode ioctl. Which
6547          * makes it a bad place for *any* MST-related activity.
6548          */
6549
6550         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6551             !aconnector->fake_enable)
6552                 connected = (aconnector->dc_sink != NULL);
6553         else
6554                 connected = (aconnector->base.force == DRM_FORCE_ON);
6555
6556         update_subconnector_property(aconnector);
6557
6558         return (connected ? connector_status_connected :
6559                         connector_status_disconnected);
6560 }
6561
6562 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6563                                             struct drm_connector_state *connector_state,
6564                                             struct drm_property *property,
6565                                             uint64_t val)
6566 {
6567         struct drm_device *dev = connector->dev;
6568         struct amdgpu_device *adev = drm_to_adev(dev);
6569         struct dm_connector_state *dm_old_state =
6570                 to_dm_connector_state(connector->state);
6571         struct dm_connector_state *dm_new_state =
6572                 to_dm_connector_state(connector_state);
6573
6574         int ret = -EINVAL;
6575
6576         if (property == dev->mode_config.scaling_mode_property) {
6577                 enum amdgpu_rmx_type rmx_type;
6578
6579                 switch (val) {
6580                 case DRM_MODE_SCALE_CENTER:
6581                         rmx_type = RMX_CENTER;
6582                         break;
6583                 case DRM_MODE_SCALE_ASPECT:
6584                         rmx_type = RMX_ASPECT;
6585                         break;
6586                 case DRM_MODE_SCALE_FULLSCREEN:
6587                         rmx_type = RMX_FULL;
6588                         break;
6589                 case DRM_MODE_SCALE_NONE:
6590                 default:
6591                         rmx_type = RMX_OFF;
6592                         break;
6593                 }
6594
6595                 if (dm_old_state->scaling == rmx_type)
6596                         return 0;
6597
6598                 dm_new_state->scaling = rmx_type;
6599                 ret = 0;
6600         } else if (property == adev->mode_info.underscan_hborder_property) {
6601                 dm_new_state->underscan_hborder = val;
6602                 ret = 0;
6603         } else if (property == adev->mode_info.underscan_vborder_property) {
6604                 dm_new_state->underscan_vborder = val;
6605                 ret = 0;
6606         } else if (property == adev->mode_info.underscan_property) {
6607                 dm_new_state->underscan_enable = val;
6608                 ret = 0;
6609         } else if (property == adev->mode_info.abm_level_property) {
6610                 dm_new_state->abm_level = val;
6611                 ret = 0;
6612         }
6613
6614         return ret;
6615 }
6616
6617 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6618                                             const struct drm_connector_state *state,
6619                                             struct drm_property *property,
6620                                             uint64_t *val)
6621 {
6622         struct drm_device *dev = connector->dev;
6623         struct amdgpu_device *adev = drm_to_adev(dev);
6624         struct dm_connector_state *dm_state =
6625                 to_dm_connector_state(state);
6626         int ret = -EINVAL;
6627
6628         if (property == dev->mode_config.scaling_mode_property) {
6629                 switch (dm_state->scaling) {
6630                 case RMX_CENTER:
6631                         *val = DRM_MODE_SCALE_CENTER;
6632                         break;
6633                 case RMX_ASPECT:
6634                         *val = DRM_MODE_SCALE_ASPECT;
6635                         break;
6636                 case RMX_FULL:
6637                         *val = DRM_MODE_SCALE_FULLSCREEN;
6638                         break;
6639                 case RMX_OFF:
6640                 default:
6641                         *val = DRM_MODE_SCALE_NONE;
6642                         break;
6643                 }
6644                 ret = 0;
6645         } else if (property == adev->mode_info.underscan_hborder_property) {
6646                 *val = dm_state->underscan_hborder;
6647                 ret = 0;
6648         } else if (property == adev->mode_info.underscan_vborder_property) {
6649                 *val = dm_state->underscan_vborder;
6650                 ret = 0;
6651         } else if (property == adev->mode_info.underscan_property) {
6652                 *val = dm_state->underscan_enable;
6653                 ret = 0;
6654         } else if (property == adev->mode_info.abm_level_property) {
6655                 *val = dm_state->abm_level;
6656                 ret = 0;
6657         }
6658
6659         return ret;
6660 }
6661
6662 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6663 {
6664         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6665
6666         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6667 }
6668
6669 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6670 {
6671         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6672         const struct dc_link *link = aconnector->dc_link;
6673         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6674         struct amdgpu_display_manager *dm = &adev->dm;
6675         int i;
6676
6677         /*
6678          * Call only if mst_mgr was iniitalized before since it's not done
6679          * for all connector types.
6680          */
6681         if (aconnector->mst_mgr.dev)
6682                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6683
6684 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6685         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6686         for (i = 0; i < dm->num_of_edps; i++) {
6687                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6688                         backlight_device_unregister(dm->backlight_dev[i]);
6689                         dm->backlight_dev[i] = NULL;
6690                 }
6691         }
6692 #endif
6693
6694         if (aconnector->dc_em_sink)
6695                 dc_sink_release(aconnector->dc_em_sink);
6696         aconnector->dc_em_sink = NULL;
6697         if (aconnector->dc_sink)
6698                 dc_sink_release(aconnector->dc_sink);
6699         aconnector->dc_sink = NULL;
6700
6701         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6702         drm_connector_unregister(connector);
6703         drm_connector_cleanup(connector);
6704         if (aconnector->i2c) {
6705                 i2c_del_adapter(&aconnector->i2c->base);
6706                 kfree(aconnector->i2c);
6707         }
6708         kfree(aconnector->dm_dp_aux.aux.name);
6709
6710         kfree(connector);
6711 }
6712
6713 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6714 {
6715         struct dm_connector_state *state =
6716                 to_dm_connector_state(connector->state);
6717
6718         if (connector->state)
6719                 __drm_atomic_helper_connector_destroy_state(connector->state);
6720
6721         kfree(state);
6722
6723         state = kzalloc(sizeof(*state), GFP_KERNEL);
6724
6725         if (state) {
6726                 state->scaling = RMX_OFF;
6727                 state->underscan_enable = false;
6728                 state->underscan_hborder = 0;
6729                 state->underscan_vborder = 0;
6730                 state->base.max_requested_bpc = 8;
6731                 state->vcpi_slots = 0;
6732                 state->pbn = 0;
6733                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6734                         state->abm_level = amdgpu_dm_abm_level;
6735
6736                 __drm_atomic_helper_connector_reset(connector, &state->base);
6737         }
6738 }
6739
6740 struct drm_connector_state *
6741 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6742 {
6743         struct dm_connector_state *state =
6744                 to_dm_connector_state(connector->state);
6745
6746         struct dm_connector_state *new_state =
6747                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6748
6749         if (!new_state)
6750                 return NULL;
6751
6752         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6753
6754         new_state->freesync_capable = state->freesync_capable;
6755         new_state->abm_level = state->abm_level;
6756         new_state->scaling = state->scaling;
6757         new_state->underscan_enable = state->underscan_enable;
6758         new_state->underscan_hborder = state->underscan_hborder;
6759         new_state->underscan_vborder = state->underscan_vborder;
6760         new_state->vcpi_slots = state->vcpi_slots;
6761         new_state->pbn = state->pbn;
6762         return &new_state->base;
6763 }
6764
6765 static int
6766 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6767 {
6768         struct amdgpu_dm_connector *amdgpu_dm_connector =
6769                 to_amdgpu_dm_connector(connector);
6770         int r;
6771
6772         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6773             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6774                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6775                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6776                 if (r)
6777                         return r;
6778         }
6779
6780 #if defined(CONFIG_DEBUG_FS)
6781         connector_debugfs_init(amdgpu_dm_connector);
6782 #endif
6783
6784         return 0;
6785 }
6786
6787 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6788         .reset = amdgpu_dm_connector_funcs_reset,
6789         .detect = amdgpu_dm_connector_detect,
6790         .fill_modes = drm_helper_probe_single_connector_modes,
6791         .destroy = amdgpu_dm_connector_destroy,
6792         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6793         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6794         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6795         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6796         .late_register = amdgpu_dm_connector_late_register,
6797         .early_unregister = amdgpu_dm_connector_unregister
6798 };
6799
6800 static int get_modes(struct drm_connector *connector)
6801 {
6802         return amdgpu_dm_connector_get_modes(connector);
6803 }
6804
6805 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6806 {
6807         struct dc_sink_init_data init_params = {
6808                         .link = aconnector->dc_link,
6809                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6810         };
6811         struct edid *edid;
6812
6813         if (!aconnector->base.edid_blob_ptr) {
6814                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6815                                 aconnector->base.name);
6816
6817                 aconnector->base.force = DRM_FORCE_OFF;
6818                 aconnector->base.override_edid = false;
6819                 return;
6820         }
6821
6822         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6823
6824         aconnector->edid = edid;
6825
6826         aconnector->dc_em_sink = dc_link_add_remote_sink(
6827                 aconnector->dc_link,
6828                 (uint8_t *)edid,
6829                 (edid->extensions + 1) * EDID_LENGTH,
6830                 &init_params);
6831
6832         if (aconnector->base.force == DRM_FORCE_ON) {
6833                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6834                 aconnector->dc_link->local_sink :
6835                 aconnector->dc_em_sink;
6836                 dc_sink_retain(aconnector->dc_sink);
6837         }
6838 }
6839
6840 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6841 {
6842         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6843
6844         /*
6845          * In case of headless boot with force on for DP managed connector
6846          * Those settings have to be != 0 to get initial modeset
6847          */
6848         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6849                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6850                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6851         }
6852
6853
6854         aconnector->base.override_edid = true;
6855         create_eml_sink(aconnector);
6856 }
6857
6858 static struct dc_stream_state *
6859 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6860                                 const struct drm_display_mode *drm_mode,
6861                                 const struct dm_connector_state *dm_state,
6862                                 const struct dc_stream_state *old_stream)
6863 {
6864         struct drm_connector *connector = &aconnector->base;
6865         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6866         struct dc_stream_state *stream;
6867         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6868         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6869         enum dc_status dc_result = DC_OK;
6870
6871         do {
6872                 stream = create_stream_for_sink(aconnector, drm_mode,
6873                                                 dm_state, old_stream,
6874                                                 requested_bpc);
6875                 if (stream == NULL) {
6876                         DRM_ERROR("Failed to create stream for sink!\n");
6877                         break;
6878                 }
6879
6880                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6881
6882                 if (dc_result != DC_OK) {
6883                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6884                                       drm_mode->hdisplay,
6885                                       drm_mode->vdisplay,
6886                                       drm_mode->clock,
6887                                       dc_result,
6888                                       dc_status_to_str(dc_result));
6889
6890                         dc_stream_release(stream);
6891                         stream = NULL;
6892                         requested_bpc -= 2; /* lower bpc to retry validation */
6893                 }
6894
6895         } while (stream == NULL && requested_bpc >= 6);
6896
6897         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6898                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6899
6900                 aconnector->force_yuv420_output = true;
6901                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6902                                                 dm_state, old_stream);
6903                 aconnector->force_yuv420_output = false;
6904         }
6905
6906         return stream;
6907 }
6908
6909 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6910                                    struct drm_display_mode *mode)
6911 {
6912         int result = MODE_ERROR;
6913         struct dc_sink *dc_sink;
6914         /* TODO: Unhardcode stream count */
6915         struct dc_stream_state *stream;
6916         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6917
6918         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6919                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6920                 return result;
6921
6922         /*
6923          * Only run this the first time mode_valid is called to initilialize
6924          * EDID mgmt
6925          */
6926         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6927                 !aconnector->dc_em_sink)
6928                 handle_edid_mgmt(aconnector);
6929
6930         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6931
6932         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6933                                 aconnector->base.force != DRM_FORCE_ON) {
6934                 DRM_ERROR("dc_sink is NULL!\n");
6935                 goto fail;
6936         }
6937
6938         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6939         if (stream) {
6940                 dc_stream_release(stream);
6941                 result = MODE_OK;
6942         }
6943
6944 fail:
6945         /* TODO: error handling*/
6946         return result;
6947 }
6948
6949 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6950                                 struct dc_info_packet *out)
6951 {
6952         struct hdmi_drm_infoframe frame;
6953         unsigned char buf[30]; /* 26 + 4 */
6954         ssize_t len;
6955         int ret, i;
6956
6957         memset(out, 0, sizeof(*out));
6958
6959         if (!state->hdr_output_metadata)
6960                 return 0;
6961
6962         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6963         if (ret)
6964                 return ret;
6965
6966         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6967         if (len < 0)
6968                 return (int)len;
6969
6970         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6971         if (len != 30)
6972                 return -EINVAL;
6973
6974         /* Prepare the infopacket for DC. */
6975         switch (state->connector->connector_type) {
6976         case DRM_MODE_CONNECTOR_HDMIA:
6977                 out->hb0 = 0x87; /* type */
6978                 out->hb1 = 0x01; /* version */
6979                 out->hb2 = 0x1A; /* length */
6980                 out->sb[0] = buf[3]; /* checksum */
6981                 i = 1;
6982                 break;
6983
6984         case DRM_MODE_CONNECTOR_DisplayPort:
6985         case DRM_MODE_CONNECTOR_eDP:
6986                 out->hb0 = 0x00; /* sdp id, zero */
6987                 out->hb1 = 0x87; /* type */
6988                 out->hb2 = 0x1D; /* payload len - 1 */
6989                 out->hb3 = (0x13 << 2); /* sdp version */
6990                 out->sb[0] = 0x01; /* version */
6991                 out->sb[1] = 0x1A; /* length */
6992                 i = 2;
6993                 break;
6994
6995         default:
6996                 return -EINVAL;
6997         }
6998
6999         memcpy(&out->sb[i], &buf[4], 26);
7000         out->valid = true;
7001
7002         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7003                        sizeof(out->sb), false);
7004
7005         return 0;
7006 }
7007
7008 static int
7009 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7010                                  struct drm_atomic_state *state)
7011 {
7012         struct drm_connector_state *new_con_state =
7013                 drm_atomic_get_new_connector_state(state, conn);
7014         struct drm_connector_state *old_con_state =
7015                 drm_atomic_get_old_connector_state(state, conn);
7016         struct drm_crtc *crtc = new_con_state->crtc;
7017         struct drm_crtc_state *new_crtc_state;
7018         int ret;
7019
7020         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7021
7022         if (!crtc)
7023                 return 0;
7024
7025         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7026                 struct dc_info_packet hdr_infopacket;
7027
7028                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7029                 if (ret)
7030                         return ret;
7031
7032                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7033                 if (IS_ERR(new_crtc_state))
7034                         return PTR_ERR(new_crtc_state);
7035
7036                 /*
7037                  * DC considers the stream backends changed if the
7038                  * static metadata changes. Forcing the modeset also
7039                  * gives a simple way for userspace to switch from
7040                  * 8bpc to 10bpc when setting the metadata to enter
7041                  * or exit HDR.
7042                  *
7043                  * Changing the static metadata after it's been
7044                  * set is permissible, however. So only force a
7045                  * modeset if we're entering or exiting HDR.
7046                  */
7047                 new_crtc_state->mode_changed =
7048                         !old_con_state->hdr_output_metadata ||
7049                         !new_con_state->hdr_output_metadata;
7050         }
7051
7052         return 0;
7053 }
7054
7055 static const struct drm_connector_helper_funcs
7056 amdgpu_dm_connector_helper_funcs = {
7057         /*
7058          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7059          * modes will be filtered by drm_mode_validate_size(), and those modes
7060          * are missing after user start lightdm. So we need to renew modes list.
7061          * in get_modes call back, not just return the modes count
7062          */
7063         .get_modes = get_modes,
7064         .mode_valid = amdgpu_dm_connector_mode_valid,
7065         .atomic_check = amdgpu_dm_connector_atomic_check,
7066 };
7067
7068 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7069 {
7070 }
7071
7072 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7073 {
7074         struct drm_atomic_state *state = new_crtc_state->state;
7075         struct drm_plane *plane;
7076         int num_active = 0;
7077
7078         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7079                 struct drm_plane_state *new_plane_state;
7080
7081                 /* Cursor planes are "fake". */
7082                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7083                         continue;
7084
7085                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7086
7087                 if (!new_plane_state) {
7088                         /*
7089                          * The plane is enable on the CRTC and hasn't changed
7090                          * state. This means that it previously passed
7091                          * validation and is therefore enabled.
7092                          */
7093                         num_active += 1;
7094                         continue;
7095                 }
7096
7097                 /* We need a framebuffer to be considered enabled. */
7098                 num_active += (new_plane_state->fb != NULL);
7099         }
7100
7101         return num_active;
7102 }
7103
7104 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7105                                          struct drm_crtc_state *new_crtc_state)
7106 {
7107         struct dm_crtc_state *dm_new_crtc_state =
7108                 to_dm_crtc_state(new_crtc_state);
7109
7110         dm_new_crtc_state->active_planes = 0;
7111
7112         if (!dm_new_crtc_state->stream)
7113                 return;
7114
7115         dm_new_crtc_state->active_planes =
7116                 count_crtc_active_planes(new_crtc_state);
7117 }
7118
7119 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7120                                        struct drm_atomic_state *state)
7121 {
7122         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7123                                                                           crtc);
7124         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7125         struct dc *dc = adev->dm.dc;
7126         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7127         int ret = -EINVAL;
7128
7129         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7130
7131         dm_update_crtc_active_planes(crtc, crtc_state);
7132
7133         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7134                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7135                 return ret;
7136         }
7137
7138         /*
7139          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7140          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7141          * planes are disabled, which is not supported by the hardware. And there is legacy
7142          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7143          */
7144         if (crtc_state->enable &&
7145             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7146                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7147                 return -EINVAL;
7148         }
7149
7150         /* In some use cases, like reset, no stream is attached */
7151         if (!dm_crtc_state->stream)
7152                 return 0;
7153
7154         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7155                 return 0;
7156
7157         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7158         return ret;
7159 }
7160
7161 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7162                                       const struct drm_display_mode *mode,
7163                                       struct drm_display_mode *adjusted_mode)
7164 {
7165         return true;
7166 }
7167
7168 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7169         .disable = dm_crtc_helper_disable,
7170         .atomic_check = dm_crtc_helper_atomic_check,
7171         .mode_fixup = dm_crtc_helper_mode_fixup,
7172         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7173 };
7174
7175 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7176 {
7177
7178 }
7179
7180 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7181 {
7182         switch (display_color_depth) {
7183                 case COLOR_DEPTH_666:
7184                         return 6;
7185                 case COLOR_DEPTH_888:
7186                         return 8;
7187                 case COLOR_DEPTH_101010:
7188                         return 10;
7189                 case COLOR_DEPTH_121212:
7190                         return 12;
7191                 case COLOR_DEPTH_141414:
7192                         return 14;
7193                 case COLOR_DEPTH_161616:
7194                         return 16;
7195                 default:
7196                         break;
7197                 }
7198         return 0;
7199 }
7200
7201 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7202                                           struct drm_crtc_state *crtc_state,
7203                                           struct drm_connector_state *conn_state)
7204 {
7205         struct drm_atomic_state *state = crtc_state->state;
7206         struct drm_connector *connector = conn_state->connector;
7207         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7208         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7209         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7210         struct drm_dp_mst_topology_mgr *mst_mgr;
7211         struct drm_dp_mst_port *mst_port;
7212         enum dc_color_depth color_depth;
7213         int clock, bpp = 0;
7214         bool is_y420 = false;
7215
7216         if (!aconnector->port || !aconnector->dc_sink)
7217                 return 0;
7218
7219         mst_port = aconnector->port;
7220         mst_mgr = &aconnector->mst_port->mst_mgr;
7221
7222         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7223                 return 0;
7224
7225         if (!state->duplicated) {
7226                 int max_bpc = conn_state->max_requested_bpc;
7227                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7228                                 aconnector->force_yuv420_output;
7229                 color_depth = convert_color_depth_from_display_info(connector,
7230                                                                     is_y420,
7231                                                                     max_bpc);
7232                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7233                 clock = adjusted_mode->clock;
7234                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7235         }
7236         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7237                                                                            mst_mgr,
7238                                                                            mst_port,
7239                                                                            dm_new_connector_state->pbn,
7240                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7241         if (dm_new_connector_state->vcpi_slots < 0) {
7242                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7243                 return dm_new_connector_state->vcpi_slots;
7244         }
7245         return 0;
7246 }
7247
7248 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7249         .disable = dm_encoder_helper_disable,
7250         .atomic_check = dm_encoder_helper_atomic_check
7251 };
7252
7253 #if defined(CONFIG_DRM_AMD_DC_DCN)
7254 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7255                                             struct dc_state *dc_state,
7256                                             struct dsc_mst_fairness_vars *vars)
7257 {
7258         struct dc_stream_state *stream = NULL;
7259         struct drm_connector *connector;
7260         struct drm_connector_state *new_con_state;
7261         struct amdgpu_dm_connector *aconnector;
7262         struct dm_connector_state *dm_conn_state;
7263         int i, j;
7264         int vcpi, pbn_div, pbn, slot_num = 0;
7265
7266         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7267
7268                 aconnector = to_amdgpu_dm_connector(connector);
7269
7270                 if (!aconnector->port)
7271                         continue;
7272
7273                 if (!new_con_state || !new_con_state->crtc)
7274                         continue;
7275
7276                 dm_conn_state = to_dm_connector_state(new_con_state);
7277
7278                 for (j = 0; j < dc_state->stream_count; j++) {
7279                         stream = dc_state->streams[j];
7280                         if (!stream)
7281                                 continue;
7282
7283                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7284                                 break;
7285
7286                         stream = NULL;
7287                 }
7288
7289                 if (!stream)
7290                         continue;
7291
7292                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7293                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7294                 for (j = 0; j < dc_state->stream_count; j++) {
7295                         if (vars[j].aconnector == aconnector) {
7296                                 pbn = vars[j].pbn;
7297                                 break;
7298                         }
7299                 }
7300
7301                 if (j == dc_state->stream_count)
7302                         continue;
7303
7304                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7305
7306                 if (stream->timing.flags.DSC != 1) {
7307                         dm_conn_state->pbn = pbn;
7308                         dm_conn_state->vcpi_slots = slot_num;
7309
7310                         drm_dp_mst_atomic_enable_dsc(state,
7311                                                      aconnector->port,
7312                                                      dm_conn_state->pbn,
7313                                                      0,
7314                                                      false);
7315                         continue;
7316                 }
7317
7318                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7319                                                     aconnector->port,
7320                                                     pbn, pbn_div,
7321                                                     true);
7322                 if (vcpi < 0)
7323                         return vcpi;
7324
7325                 dm_conn_state->pbn = pbn;
7326                 dm_conn_state->vcpi_slots = vcpi;
7327         }
7328         return 0;
7329 }
7330 #endif
7331
7332 static void dm_drm_plane_reset(struct drm_plane *plane)
7333 {
7334         struct dm_plane_state *amdgpu_state = NULL;
7335
7336         if (plane->state)
7337                 plane->funcs->atomic_destroy_state(plane, plane->state);
7338
7339         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7340         WARN_ON(amdgpu_state == NULL);
7341
7342         if (amdgpu_state)
7343                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7344 }
7345
7346 static struct drm_plane_state *
7347 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7348 {
7349         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7350
7351         old_dm_plane_state = to_dm_plane_state(plane->state);
7352         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7353         if (!dm_plane_state)
7354                 return NULL;
7355
7356         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7357
7358         if (old_dm_plane_state->dc_state) {
7359                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7360                 dc_plane_state_retain(dm_plane_state->dc_state);
7361         }
7362
7363         return &dm_plane_state->base;
7364 }
7365
7366 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7367                                 struct drm_plane_state *state)
7368 {
7369         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7370
7371         if (dm_plane_state->dc_state)
7372                 dc_plane_state_release(dm_plane_state->dc_state);
7373
7374         drm_atomic_helper_plane_destroy_state(plane, state);
7375 }
7376
7377 static const struct drm_plane_funcs dm_plane_funcs = {
7378         .update_plane   = drm_atomic_helper_update_plane,
7379         .disable_plane  = drm_atomic_helper_disable_plane,
7380         .destroy        = drm_primary_helper_destroy,
7381         .reset = dm_drm_plane_reset,
7382         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7383         .atomic_destroy_state = dm_drm_plane_destroy_state,
7384         .format_mod_supported = dm_plane_format_mod_supported,
7385 };
7386
7387 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7388                                       struct drm_plane_state *new_state)
7389 {
7390         struct amdgpu_framebuffer *afb;
7391         struct drm_gem_object *obj;
7392         struct amdgpu_device *adev;
7393         struct amdgpu_bo *rbo;
7394         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7395         struct list_head list;
7396         struct ttm_validate_buffer tv;
7397         struct ww_acquire_ctx ticket;
7398         uint32_t domain;
7399         int r;
7400
7401         if (!new_state->fb) {
7402                 DRM_DEBUG_KMS("No FB bound\n");
7403                 return 0;
7404         }
7405
7406         afb = to_amdgpu_framebuffer(new_state->fb);
7407         obj = new_state->fb->obj[0];
7408         rbo = gem_to_amdgpu_bo(obj);
7409         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7410         INIT_LIST_HEAD(&list);
7411
7412         tv.bo = &rbo->tbo;
7413         tv.num_shared = 1;
7414         list_add(&tv.head, &list);
7415
7416         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7417         if (r) {
7418                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7419                 return r;
7420         }
7421
7422         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7423                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7424         else
7425                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7426
7427         r = amdgpu_bo_pin(rbo, domain);
7428         if (unlikely(r != 0)) {
7429                 if (r != -ERESTARTSYS)
7430                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7431                 ttm_eu_backoff_reservation(&ticket, &list);
7432                 return r;
7433         }
7434
7435         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7436         if (unlikely(r != 0)) {
7437                 amdgpu_bo_unpin(rbo);
7438                 ttm_eu_backoff_reservation(&ticket, &list);
7439                 DRM_ERROR("%p bind failed\n", rbo);
7440                 return r;
7441         }
7442
7443         ttm_eu_backoff_reservation(&ticket, &list);
7444
7445         afb->address = amdgpu_bo_gpu_offset(rbo);
7446
7447         amdgpu_bo_ref(rbo);
7448
7449         /**
7450          * We don't do surface updates on planes that have been newly created,
7451          * but we also don't have the afb->address during atomic check.
7452          *
7453          * Fill in buffer attributes depending on the address here, but only on
7454          * newly created planes since they're not being used by DC yet and this
7455          * won't modify global state.
7456          */
7457         dm_plane_state_old = to_dm_plane_state(plane->state);
7458         dm_plane_state_new = to_dm_plane_state(new_state);
7459
7460         if (dm_plane_state_new->dc_state &&
7461             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7462                 struct dc_plane_state *plane_state =
7463                         dm_plane_state_new->dc_state;
7464                 bool force_disable_dcc = !plane_state->dcc.enable;
7465
7466                 fill_plane_buffer_attributes(
7467                         adev, afb, plane_state->format, plane_state->rotation,
7468                         afb->tiling_flags,
7469                         &plane_state->tiling_info, &plane_state->plane_size,
7470                         &plane_state->dcc, &plane_state->address,
7471                         afb->tmz_surface, force_disable_dcc);
7472         }
7473
7474         return 0;
7475 }
7476
7477 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7478                                        struct drm_plane_state *old_state)
7479 {
7480         struct amdgpu_bo *rbo;
7481         int r;
7482
7483         if (!old_state->fb)
7484                 return;
7485
7486         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7487         r = amdgpu_bo_reserve(rbo, false);
7488         if (unlikely(r)) {
7489                 DRM_ERROR("failed to reserve rbo before unpin\n");
7490                 return;
7491         }
7492
7493         amdgpu_bo_unpin(rbo);
7494         amdgpu_bo_unreserve(rbo);
7495         amdgpu_bo_unref(&rbo);
7496 }
7497
7498 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7499                                        struct drm_crtc_state *new_crtc_state)
7500 {
7501         struct drm_framebuffer *fb = state->fb;
7502         int min_downscale, max_upscale;
7503         int min_scale = 0;
7504         int max_scale = INT_MAX;
7505
7506         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7507         if (fb && state->crtc) {
7508                 /* Validate viewport to cover the case when only the position changes */
7509                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7510                         int viewport_width = state->crtc_w;
7511                         int viewport_height = state->crtc_h;
7512
7513                         if (state->crtc_x < 0)
7514                                 viewport_width += state->crtc_x;
7515                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7516                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7517
7518                         if (state->crtc_y < 0)
7519                                 viewport_height += state->crtc_y;
7520                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7521                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7522
7523                         if (viewport_width < 0 || viewport_height < 0) {
7524                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7525                                 return -EINVAL;
7526                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7527                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7528                                 return -EINVAL;
7529                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7530                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7531                                 return -EINVAL;
7532                         }
7533
7534                 }
7535
7536                 /* Get min/max allowed scaling factors from plane caps. */
7537                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7538                                              &min_downscale, &max_upscale);
7539                 /*
7540                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7541                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7542                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7543                  */
7544                 min_scale = (1000 << 16) / max_upscale;
7545                 max_scale = (1000 << 16) / min_downscale;
7546         }
7547
7548         return drm_atomic_helper_check_plane_state(
7549                 state, new_crtc_state, min_scale, max_scale, true, true);
7550 }
7551
7552 static int dm_plane_atomic_check(struct drm_plane *plane,
7553                                  struct drm_atomic_state *state)
7554 {
7555         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7556                                                                                  plane);
7557         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7558         struct dc *dc = adev->dm.dc;
7559         struct dm_plane_state *dm_plane_state;
7560         struct dc_scaling_info scaling_info;
7561         struct drm_crtc_state *new_crtc_state;
7562         int ret;
7563
7564         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7565
7566         dm_plane_state = to_dm_plane_state(new_plane_state);
7567
7568         if (!dm_plane_state->dc_state)
7569                 return 0;
7570
7571         new_crtc_state =
7572                 drm_atomic_get_new_crtc_state(state,
7573                                               new_plane_state->crtc);
7574         if (!new_crtc_state)
7575                 return -EINVAL;
7576
7577         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7578         if (ret)
7579                 return ret;
7580
7581         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7582         if (ret)
7583                 return ret;
7584
7585         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7586                 return 0;
7587
7588         return -EINVAL;
7589 }
7590
7591 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7592                                        struct drm_atomic_state *state)
7593 {
7594         /* Only support async updates on cursor planes. */
7595         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7596                 return -EINVAL;
7597
7598         return 0;
7599 }
7600
7601 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7602                                          struct drm_atomic_state *state)
7603 {
7604         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7605                                                                            plane);
7606         struct drm_plane_state *old_state =
7607                 drm_atomic_get_old_plane_state(state, plane);
7608
7609         trace_amdgpu_dm_atomic_update_cursor(new_state);
7610
7611         swap(plane->state->fb, new_state->fb);
7612
7613         plane->state->src_x = new_state->src_x;
7614         plane->state->src_y = new_state->src_y;
7615         plane->state->src_w = new_state->src_w;
7616         plane->state->src_h = new_state->src_h;
7617         plane->state->crtc_x = new_state->crtc_x;
7618         plane->state->crtc_y = new_state->crtc_y;
7619         plane->state->crtc_w = new_state->crtc_w;
7620         plane->state->crtc_h = new_state->crtc_h;
7621
7622         handle_cursor_update(plane, old_state);
7623 }
7624
7625 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7626         .prepare_fb = dm_plane_helper_prepare_fb,
7627         .cleanup_fb = dm_plane_helper_cleanup_fb,
7628         .atomic_check = dm_plane_atomic_check,
7629         .atomic_async_check = dm_plane_atomic_async_check,
7630         .atomic_async_update = dm_plane_atomic_async_update
7631 };
7632
7633 /*
7634  * TODO: these are currently initialized to rgb formats only.
7635  * For future use cases we should either initialize them dynamically based on
7636  * plane capabilities, or initialize this array to all formats, so internal drm
7637  * check will succeed, and let DC implement proper check
7638  */
7639 static const uint32_t rgb_formats[] = {
7640         DRM_FORMAT_XRGB8888,
7641         DRM_FORMAT_ARGB8888,
7642         DRM_FORMAT_RGBA8888,
7643         DRM_FORMAT_XRGB2101010,
7644         DRM_FORMAT_XBGR2101010,
7645         DRM_FORMAT_ARGB2101010,
7646         DRM_FORMAT_ABGR2101010,
7647         DRM_FORMAT_XRGB16161616,
7648         DRM_FORMAT_XBGR16161616,
7649         DRM_FORMAT_ARGB16161616,
7650         DRM_FORMAT_ABGR16161616,
7651         DRM_FORMAT_XBGR8888,
7652         DRM_FORMAT_ABGR8888,
7653         DRM_FORMAT_RGB565,
7654 };
7655
7656 static const uint32_t overlay_formats[] = {
7657         DRM_FORMAT_XRGB8888,
7658         DRM_FORMAT_ARGB8888,
7659         DRM_FORMAT_RGBA8888,
7660         DRM_FORMAT_XBGR8888,
7661         DRM_FORMAT_ABGR8888,
7662         DRM_FORMAT_RGB565
7663 };
7664
7665 static const u32 cursor_formats[] = {
7666         DRM_FORMAT_ARGB8888
7667 };
7668
7669 static int get_plane_formats(const struct drm_plane *plane,
7670                              const struct dc_plane_cap *plane_cap,
7671                              uint32_t *formats, int max_formats)
7672 {
7673         int i, num_formats = 0;
7674
7675         /*
7676          * TODO: Query support for each group of formats directly from
7677          * DC plane caps. This will require adding more formats to the
7678          * caps list.
7679          */
7680
7681         switch (plane->type) {
7682         case DRM_PLANE_TYPE_PRIMARY:
7683                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7684                         if (num_formats >= max_formats)
7685                                 break;
7686
7687                         formats[num_formats++] = rgb_formats[i];
7688                 }
7689
7690                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7691                         formats[num_formats++] = DRM_FORMAT_NV12;
7692                 if (plane_cap && plane_cap->pixel_format_support.p010)
7693                         formats[num_formats++] = DRM_FORMAT_P010;
7694                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7695                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7696                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7697                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7698                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7699                 }
7700                 break;
7701
7702         case DRM_PLANE_TYPE_OVERLAY:
7703                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7704                         if (num_formats >= max_formats)
7705                                 break;
7706
7707                         formats[num_formats++] = overlay_formats[i];
7708                 }
7709                 break;
7710
7711         case DRM_PLANE_TYPE_CURSOR:
7712                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7713                         if (num_formats >= max_formats)
7714                                 break;
7715
7716                         formats[num_formats++] = cursor_formats[i];
7717                 }
7718                 break;
7719         }
7720
7721         return num_formats;
7722 }
7723
7724 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7725                                 struct drm_plane *plane,
7726                                 unsigned long possible_crtcs,
7727                                 const struct dc_plane_cap *plane_cap)
7728 {
7729         uint32_t formats[32];
7730         int num_formats;
7731         int res = -EPERM;
7732         unsigned int supported_rotations;
7733         uint64_t *modifiers = NULL;
7734
7735         num_formats = get_plane_formats(plane, plane_cap, formats,
7736                                         ARRAY_SIZE(formats));
7737
7738         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7739         if (res)
7740                 return res;
7741
7742         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7743                                        &dm_plane_funcs, formats, num_formats,
7744                                        modifiers, plane->type, NULL);
7745         kfree(modifiers);
7746         if (res)
7747                 return res;
7748
7749         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7750             plane_cap && plane_cap->per_pixel_alpha) {
7751                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7752                                           BIT(DRM_MODE_BLEND_PREMULTI);
7753
7754                 drm_plane_create_alpha_property(plane);
7755                 drm_plane_create_blend_mode_property(plane, blend_caps);
7756         }
7757
7758         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7759             plane_cap &&
7760             (plane_cap->pixel_format_support.nv12 ||
7761              plane_cap->pixel_format_support.p010)) {
7762                 /* This only affects YUV formats. */
7763                 drm_plane_create_color_properties(
7764                         plane,
7765                         BIT(DRM_COLOR_YCBCR_BT601) |
7766                         BIT(DRM_COLOR_YCBCR_BT709) |
7767                         BIT(DRM_COLOR_YCBCR_BT2020),
7768                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7769                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7770                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7771         }
7772
7773         supported_rotations =
7774                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7775                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7776
7777         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7778             plane->type != DRM_PLANE_TYPE_CURSOR)
7779                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7780                                                    supported_rotations);
7781
7782         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7783
7784         /* Create (reset) the plane state */
7785         if (plane->funcs->reset)
7786                 plane->funcs->reset(plane);
7787
7788         return 0;
7789 }
7790
7791 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7792                                struct drm_plane *plane,
7793                                uint32_t crtc_index)
7794 {
7795         struct amdgpu_crtc *acrtc = NULL;
7796         struct drm_plane *cursor_plane;
7797
7798         int res = -ENOMEM;
7799
7800         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7801         if (!cursor_plane)
7802                 goto fail;
7803
7804         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7805         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7806
7807         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7808         if (!acrtc)
7809                 goto fail;
7810
7811         res = drm_crtc_init_with_planes(
7812                         dm->ddev,
7813                         &acrtc->base,
7814                         plane,
7815                         cursor_plane,
7816                         &amdgpu_dm_crtc_funcs, NULL);
7817
7818         if (res)
7819                 goto fail;
7820
7821         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7822
7823         /* Create (reset) the plane state */
7824         if (acrtc->base.funcs->reset)
7825                 acrtc->base.funcs->reset(&acrtc->base);
7826
7827         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7828         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7829
7830         acrtc->crtc_id = crtc_index;
7831         acrtc->base.enabled = false;
7832         acrtc->otg_inst = -1;
7833
7834         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7835         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7836                                    true, MAX_COLOR_LUT_ENTRIES);
7837         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7838
7839         return 0;
7840
7841 fail:
7842         kfree(acrtc);
7843         kfree(cursor_plane);
7844         return res;
7845 }
7846
7847
7848 static int to_drm_connector_type(enum signal_type st)
7849 {
7850         switch (st) {
7851         case SIGNAL_TYPE_HDMI_TYPE_A:
7852                 return DRM_MODE_CONNECTOR_HDMIA;
7853         case SIGNAL_TYPE_EDP:
7854                 return DRM_MODE_CONNECTOR_eDP;
7855         case SIGNAL_TYPE_LVDS:
7856                 return DRM_MODE_CONNECTOR_LVDS;
7857         case SIGNAL_TYPE_RGB:
7858                 return DRM_MODE_CONNECTOR_VGA;
7859         case SIGNAL_TYPE_DISPLAY_PORT:
7860         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7861                 return DRM_MODE_CONNECTOR_DisplayPort;
7862         case SIGNAL_TYPE_DVI_DUAL_LINK:
7863         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7864                 return DRM_MODE_CONNECTOR_DVID;
7865         case SIGNAL_TYPE_VIRTUAL:
7866                 return DRM_MODE_CONNECTOR_VIRTUAL;
7867
7868         default:
7869                 return DRM_MODE_CONNECTOR_Unknown;
7870         }
7871 }
7872
7873 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7874 {
7875         struct drm_encoder *encoder;
7876
7877         /* There is only one encoder per connector */
7878         drm_connector_for_each_possible_encoder(connector, encoder)
7879                 return encoder;
7880
7881         return NULL;
7882 }
7883
7884 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7885 {
7886         struct drm_encoder *encoder;
7887         struct amdgpu_encoder *amdgpu_encoder;
7888
7889         encoder = amdgpu_dm_connector_to_encoder(connector);
7890
7891         if (encoder == NULL)
7892                 return;
7893
7894         amdgpu_encoder = to_amdgpu_encoder(encoder);
7895
7896         amdgpu_encoder->native_mode.clock = 0;
7897
7898         if (!list_empty(&connector->probed_modes)) {
7899                 struct drm_display_mode *preferred_mode = NULL;
7900
7901                 list_for_each_entry(preferred_mode,
7902                                     &connector->probed_modes,
7903                                     head) {
7904                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7905                                 amdgpu_encoder->native_mode = *preferred_mode;
7906
7907                         break;
7908                 }
7909
7910         }
7911 }
7912
7913 static struct drm_display_mode *
7914 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7915                              char *name,
7916                              int hdisplay, int vdisplay)
7917 {
7918         struct drm_device *dev = encoder->dev;
7919         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7920         struct drm_display_mode *mode = NULL;
7921         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7922
7923         mode = drm_mode_duplicate(dev, native_mode);
7924
7925         if (mode == NULL)
7926                 return NULL;
7927
7928         mode->hdisplay = hdisplay;
7929         mode->vdisplay = vdisplay;
7930         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7931         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7932
7933         return mode;
7934
7935 }
7936
7937 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7938                                                  struct drm_connector *connector)
7939 {
7940         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7941         struct drm_display_mode *mode = NULL;
7942         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7943         struct amdgpu_dm_connector *amdgpu_dm_connector =
7944                                 to_amdgpu_dm_connector(connector);
7945         int i;
7946         int n;
7947         struct mode_size {
7948                 char name[DRM_DISPLAY_MODE_LEN];
7949                 int w;
7950                 int h;
7951         } common_modes[] = {
7952                 {  "640x480",  640,  480},
7953                 {  "800x600",  800,  600},
7954                 { "1024x768", 1024,  768},
7955                 { "1280x720", 1280,  720},
7956                 { "1280x800", 1280,  800},
7957                 {"1280x1024", 1280, 1024},
7958                 { "1440x900", 1440,  900},
7959                 {"1680x1050", 1680, 1050},
7960                 {"1600x1200", 1600, 1200},
7961                 {"1920x1080", 1920, 1080},
7962                 {"1920x1200", 1920, 1200}
7963         };
7964
7965         n = ARRAY_SIZE(common_modes);
7966
7967         for (i = 0; i < n; i++) {
7968                 struct drm_display_mode *curmode = NULL;
7969                 bool mode_existed = false;
7970
7971                 if (common_modes[i].w > native_mode->hdisplay ||
7972                     common_modes[i].h > native_mode->vdisplay ||
7973                    (common_modes[i].w == native_mode->hdisplay &&
7974                     common_modes[i].h == native_mode->vdisplay))
7975                         continue;
7976
7977                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7978                         if (common_modes[i].w == curmode->hdisplay &&
7979                             common_modes[i].h == curmode->vdisplay) {
7980                                 mode_existed = true;
7981                                 break;
7982                         }
7983                 }
7984
7985                 if (mode_existed)
7986                         continue;
7987
7988                 mode = amdgpu_dm_create_common_mode(encoder,
7989                                 common_modes[i].name, common_modes[i].w,
7990                                 common_modes[i].h);
7991                 drm_mode_probed_add(connector, mode);
7992                 amdgpu_dm_connector->num_modes++;
7993         }
7994 }
7995
7996 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7997 {
7998         struct drm_encoder *encoder;
7999         struct amdgpu_encoder *amdgpu_encoder;
8000         const struct drm_display_mode *native_mode;
8001
8002         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8003             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8004                 return;
8005
8006         encoder = amdgpu_dm_connector_to_encoder(connector);
8007         if (!encoder)
8008                 return;
8009
8010         amdgpu_encoder = to_amdgpu_encoder(encoder);
8011
8012         native_mode = &amdgpu_encoder->native_mode;
8013         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8014                 return;
8015
8016         drm_connector_set_panel_orientation_with_quirk(connector,
8017                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8018                                                        native_mode->hdisplay,
8019                                                        native_mode->vdisplay);
8020 }
8021
8022 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8023                                               struct edid *edid)
8024 {
8025         struct amdgpu_dm_connector *amdgpu_dm_connector =
8026                         to_amdgpu_dm_connector(connector);
8027
8028         if (edid) {
8029                 /* empty probed_modes */
8030                 INIT_LIST_HEAD(&connector->probed_modes);
8031                 amdgpu_dm_connector->num_modes =
8032                                 drm_add_edid_modes(connector, edid);
8033
8034                 /* sorting the probed modes before calling function
8035                  * amdgpu_dm_get_native_mode() since EDID can have
8036                  * more than one preferred mode. The modes that are
8037                  * later in the probed mode list could be of higher
8038                  * and preferred resolution. For example, 3840x2160
8039                  * resolution in base EDID preferred timing and 4096x2160
8040                  * preferred resolution in DID extension block later.
8041                  */
8042                 drm_mode_sort(&connector->probed_modes);
8043                 amdgpu_dm_get_native_mode(connector);
8044
8045                 /* Freesync capabilities are reset by calling
8046                  * drm_add_edid_modes() and need to be
8047                  * restored here.
8048                  */
8049                 amdgpu_dm_update_freesync_caps(connector, edid);
8050
8051                 amdgpu_set_panel_orientation(connector);
8052         } else {
8053                 amdgpu_dm_connector->num_modes = 0;
8054         }
8055 }
8056
8057 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8058                               struct drm_display_mode *mode)
8059 {
8060         struct drm_display_mode *m;
8061
8062         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8063                 if (drm_mode_equal(m, mode))
8064                         return true;
8065         }
8066
8067         return false;
8068 }
8069
8070 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8071 {
8072         const struct drm_display_mode *m;
8073         struct drm_display_mode *new_mode;
8074         uint i;
8075         uint32_t new_modes_count = 0;
8076
8077         /* Standard FPS values
8078          *
8079          * 23.976       - TV/NTSC
8080          * 24           - Cinema
8081          * 25           - TV/PAL
8082          * 29.97        - TV/NTSC
8083          * 30           - TV/NTSC
8084          * 48           - Cinema HFR
8085          * 50           - TV/PAL
8086          * 60           - Commonly used
8087          * 48,72,96,120 - Multiples of 24
8088          */
8089         static const uint32_t common_rates[] = {
8090                 23976, 24000, 25000, 29970, 30000,
8091                 48000, 50000, 60000, 72000, 96000, 120000
8092         };
8093
8094         /*
8095          * Find mode with highest refresh rate with the same resolution
8096          * as the preferred mode. Some monitors report a preferred mode
8097          * with lower resolution than the highest refresh rate supported.
8098          */
8099
8100         m = get_highest_refresh_rate_mode(aconnector, true);
8101         if (!m)
8102                 return 0;
8103
8104         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8105                 uint64_t target_vtotal, target_vtotal_diff;
8106                 uint64_t num, den;
8107
8108                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8109                         continue;
8110
8111                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8112                     common_rates[i] > aconnector->max_vfreq * 1000)
8113                         continue;
8114
8115                 num = (unsigned long long)m->clock * 1000 * 1000;
8116                 den = common_rates[i] * (unsigned long long)m->htotal;
8117                 target_vtotal = div_u64(num, den);
8118                 target_vtotal_diff = target_vtotal - m->vtotal;
8119
8120                 /* Check for illegal modes */
8121                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8122                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8123                     m->vtotal + target_vtotal_diff < m->vsync_end)
8124                         continue;
8125
8126                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8127                 if (!new_mode)
8128                         goto out;
8129
8130                 new_mode->vtotal += (u16)target_vtotal_diff;
8131                 new_mode->vsync_start += (u16)target_vtotal_diff;
8132                 new_mode->vsync_end += (u16)target_vtotal_diff;
8133                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8134                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8135
8136                 if (!is_duplicate_mode(aconnector, new_mode)) {
8137                         drm_mode_probed_add(&aconnector->base, new_mode);
8138                         new_modes_count += 1;
8139                 } else
8140                         drm_mode_destroy(aconnector->base.dev, new_mode);
8141         }
8142  out:
8143         return new_modes_count;
8144 }
8145
8146 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8147                                                    struct edid *edid)
8148 {
8149         struct amdgpu_dm_connector *amdgpu_dm_connector =
8150                 to_amdgpu_dm_connector(connector);
8151
8152         if (!(amdgpu_freesync_vid_mode && edid))
8153                 return;
8154
8155         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8156                 amdgpu_dm_connector->num_modes +=
8157                         add_fs_modes(amdgpu_dm_connector);
8158 }
8159
8160 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8161 {
8162         struct amdgpu_dm_connector *amdgpu_dm_connector =
8163                         to_amdgpu_dm_connector(connector);
8164         struct drm_encoder *encoder;
8165         struct edid *edid = amdgpu_dm_connector->edid;
8166
8167         encoder = amdgpu_dm_connector_to_encoder(connector);
8168
8169         if (!drm_edid_is_valid(edid)) {
8170                 amdgpu_dm_connector->num_modes =
8171                                 drm_add_modes_noedid(connector, 640, 480);
8172         } else {
8173                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8174                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8175                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8176         }
8177         amdgpu_dm_fbc_init(connector);
8178
8179         return amdgpu_dm_connector->num_modes;
8180 }
8181
8182 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8183                                      struct amdgpu_dm_connector *aconnector,
8184                                      int connector_type,
8185                                      struct dc_link *link,
8186                                      int link_index)
8187 {
8188         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8189
8190         /*
8191          * Some of the properties below require access to state, like bpc.
8192          * Allocate some default initial connector state with our reset helper.
8193          */
8194         if (aconnector->base.funcs->reset)
8195                 aconnector->base.funcs->reset(&aconnector->base);
8196
8197         aconnector->connector_id = link_index;
8198         aconnector->dc_link = link;
8199         aconnector->base.interlace_allowed = false;
8200         aconnector->base.doublescan_allowed = false;
8201         aconnector->base.stereo_allowed = false;
8202         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8203         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8204         aconnector->audio_inst = -1;
8205         mutex_init(&aconnector->hpd_lock);
8206
8207         /*
8208          * configure support HPD hot plug connector_>polled default value is 0
8209          * which means HPD hot plug not supported
8210          */
8211         switch (connector_type) {
8212         case DRM_MODE_CONNECTOR_HDMIA:
8213                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8214                 aconnector->base.ycbcr_420_allowed =
8215                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8216                 break;
8217         case DRM_MODE_CONNECTOR_DisplayPort:
8218                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8219                 if (link->is_dig_mapping_flexible &&
8220                     link->dc->res_pool->funcs->link_encs_assign) {
8221                         link->link_enc =
8222                                 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8223                         if (!link->link_enc)
8224                                 link->link_enc =
8225                                         link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8226                 }
8227
8228                 if (link->link_enc)
8229                         aconnector->base.ycbcr_420_allowed =
8230                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8231                 break;
8232         case DRM_MODE_CONNECTOR_DVID:
8233                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8234                 break;
8235         default:
8236                 break;
8237         }
8238
8239         drm_object_attach_property(&aconnector->base.base,
8240                                 dm->ddev->mode_config.scaling_mode_property,
8241                                 DRM_MODE_SCALE_NONE);
8242
8243         drm_object_attach_property(&aconnector->base.base,
8244                                 adev->mode_info.underscan_property,
8245                                 UNDERSCAN_OFF);
8246         drm_object_attach_property(&aconnector->base.base,
8247                                 adev->mode_info.underscan_hborder_property,
8248                                 0);
8249         drm_object_attach_property(&aconnector->base.base,
8250                                 adev->mode_info.underscan_vborder_property,
8251                                 0);
8252
8253         if (!aconnector->mst_port)
8254                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8255
8256         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8257         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8258         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8259
8260         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8261             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8262                 drm_object_attach_property(&aconnector->base.base,
8263                                 adev->mode_info.abm_level_property, 0);
8264         }
8265
8266         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8267             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8268             connector_type == DRM_MODE_CONNECTOR_eDP) {
8269                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8270
8271                 if (!aconnector->mst_port)
8272                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8273
8274 #ifdef CONFIG_DRM_AMD_DC_HDCP
8275                 if (adev->dm.hdcp_workqueue)
8276                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8277 #endif
8278         }
8279 }
8280
8281 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8282                               struct i2c_msg *msgs, int num)
8283 {
8284         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8285         struct ddc_service *ddc_service = i2c->ddc_service;
8286         struct i2c_command cmd;
8287         int i;
8288         int result = -EIO;
8289
8290         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8291
8292         if (!cmd.payloads)
8293                 return result;
8294
8295         cmd.number_of_payloads = num;
8296         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8297         cmd.speed = 100;
8298
8299         for (i = 0; i < num; i++) {
8300                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8301                 cmd.payloads[i].address = msgs[i].addr;
8302                 cmd.payloads[i].length = msgs[i].len;
8303                 cmd.payloads[i].data = msgs[i].buf;
8304         }
8305
8306         if (dc_submit_i2c(
8307                         ddc_service->ctx->dc,
8308                         ddc_service->ddc_pin->hw_info.ddc_channel,
8309                         &cmd))
8310                 result = num;
8311
8312         kfree(cmd.payloads);
8313         return result;
8314 }
8315
8316 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8317 {
8318         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8319 }
8320
8321 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8322         .master_xfer = amdgpu_dm_i2c_xfer,
8323         .functionality = amdgpu_dm_i2c_func,
8324 };
8325
8326 static struct amdgpu_i2c_adapter *
8327 create_i2c(struct ddc_service *ddc_service,
8328            int link_index,
8329            int *res)
8330 {
8331         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8332         struct amdgpu_i2c_adapter *i2c;
8333
8334         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8335         if (!i2c)
8336                 return NULL;
8337         i2c->base.owner = THIS_MODULE;
8338         i2c->base.class = I2C_CLASS_DDC;
8339         i2c->base.dev.parent = &adev->pdev->dev;
8340         i2c->base.algo = &amdgpu_dm_i2c_algo;
8341         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8342         i2c_set_adapdata(&i2c->base, i2c);
8343         i2c->ddc_service = ddc_service;
8344         if (i2c->ddc_service->ddc_pin)
8345                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8346
8347         return i2c;
8348 }
8349
8350
8351 /*
8352  * Note: this function assumes that dc_link_detect() was called for the
8353  * dc_link which will be represented by this aconnector.
8354  */
8355 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8356                                     struct amdgpu_dm_connector *aconnector,
8357                                     uint32_t link_index,
8358                                     struct amdgpu_encoder *aencoder)
8359 {
8360         int res = 0;
8361         int connector_type;
8362         struct dc *dc = dm->dc;
8363         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8364         struct amdgpu_i2c_adapter *i2c;
8365
8366         link->priv = aconnector;
8367
8368         DRM_DEBUG_DRIVER("%s()\n", __func__);
8369
8370         i2c = create_i2c(link->ddc, link->link_index, &res);
8371         if (!i2c) {
8372                 DRM_ERROR("Failed to create i2c adapter data\n");
8373                 return -ENOMEM;
8374         }
8375
8376         aconnector->i2c = i2c;
8377         res = i2c_add_adapter(&i2c->base);
8378
8379         if (res) {
8380                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8381                 goto out_free;
8382         }
8383
8384         connector_type = to_drm_connector_type(link->connector_signal);
8385
8386         res = drm_connector_init_with_ddc(
8387                         dm->ddev,
8388                         &aconnector->base,
8389                         &amdgpu_dm_connector_funcs,
8390                         connector_type,
8391                         &i2c->base);
8392
8393         if (res) {
8394                 DRM_ERROR("connector_init failed\n");
8395                 aconnector->connector_id = -1;
8396                 goto out_free;
8397         }
8398
8399         drm_connector_helper_add(
8400                         &aconnector->base,
8401                         &amdgpu_dm_connector_helper_funcs);
8402
8403         amdgpu_dm_connector_init_helper(
8404                 dm,
8405                 aconnector,
8406                 connector_type,
8407                 link,
8408                 link_index);
8409
8410         drm_connector_attach_encoder(
8411                 &aconnector->base, &aencoder->base);
8412
8413         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8414                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8415                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8416
8417 out_free:
8418         if (res) {
8419                 kfree(i2c);
8420                 aconnector->i2c = NULL;
8421         }
8422         return res;
8423 }
8424
8425 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8426 {
8427         switch (adev->mode_info.num_crtc) {
8428         case 1:
8429                 return 0x1;
8430         case 2:
8431                 return 0x3;
8432         case 3:
8433                 return 0x7;
8434         case 4:
8435                 return 0xf;
8436         case 5:
8437                 return 0x1f;
8438         case 6:
8439         default:
8440                 return 0x3f;
8441         }
8442 }
8443
8444 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8445                                   struct amdgpu_encoder *aencoder,
8446                                   uint32_t link_index)
8447 {
8448         struct amdgpu_device *adev = drm_to_adev(dev);
8449
8450         int res = drm_encoder_init(dev,
8451                                    &aencoder->base,
8452                                    &amdgpu_dm_encoder_funcs,
8453                                    DRM_MODE_ENCODER_TMDS,
8454                                    NULL);
8455
8456         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8457
8458         if (!res)
8459                 aencoder->encoder_id = link_index;
8460         else
8461                 aencoder->encoder_id = -1;
8462
8463         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8464
8465         return res;
8466 }
8467
8468 static void manage_dm_interrupts(struct amdgpu_device *adev,
8469                                  struct amdgpu_crtc *acrtc,
8470                                  bool enable)
8471 {
8472         /*
8473          * We have no guarantee that the frontend index maps to the same
8474          * backend index - some even map to more than one.
8475          *
8476          * TODO: Use a different interrupt or check DC itself for the mapping.
8477          */
8478         int irq_type =
8479                 amdgpu_display_crtc_idx_to_irq_type(
8480                         adev,
8481                         acrtc->crtc_id);
8482
8483         if (enable) {
8484                 drm_crtc_vblank_on(&acrtc->base);
8485                 amdgpu_irq_get(
8486                         adev,
8487                         &adev->pageflip_irq,
8488                         irq_type);
8489 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8490                 amdgpu_irq_get(
8491                         adev,
8492                         &adev->vline0_irq,
8493                         irq_type);
8494 #endif
8495         } else {
8496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8497                 amdgpu_irq_put(
8498                         adev,
8499                         &adev->vline0_irq,
8500                         irq_type);
8501 #endif
8502                 amdgpu_irq_put(
8503                         adev,
8504                         &adev->pageflip_irq,
8505                         irq_type);
8506                 drm_crtc_vblank_off(&acrtc->base);
8507         }
8508 }
8509
8510 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8511                                       struct amdgpu_crtc *acrtc)
8512 {
8513         int irq_type =
8514                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8515
8516         /**
8517          * This reads the current state for the IRQ and force reapplies
8518          * the setting to hardware.
8519          */
8520         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8521 }
8522
8523 static bool
8524 is_scaling_state_different(const struct dm_connector_state *dm_state,
8525                            const struct dm_connector_state *old_dm_state)
8526 {
8527         if (dm_state->scaling != old_dm_state->scaling)
8528                 return true;
8529         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8530                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8531                         return true;
8532         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8533                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8534                         return true;
8535         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8536                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8537                 return true;
8538         return false;
8539 }
8540
8541 #ifdef CONFIG_DRM_AMD_DC_HDCP
8542 static bool is_content_protection_different(struct drm_connector_state *state,
8543                                             const struct drm_connector_state *old_state,
8544                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8545 {
8546         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8547         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8548
8549         /* Handle: Type0/1 change */
8550         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8551             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8552                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8553                 return true;
8554         }
8555
8556         /* CP is being re enabled, ignore this
8557          *
8558          * Handles:     ENABLED -> DESIRED
8559          */
8560         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8561             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8562                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8563                 return false;
8564         }
8565
8566         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8567          *
8568          * Handles:     UNDESIRED -> ENABLED
8569          */
8570         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8571             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8572                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8573
8574         /* Stream removed and re-enabled
8575          *
8576          * Can sometimes overlap with the HPD case,
8577          * thus set update_hdcp to false to avoid
8578          * setting HDCP multiple times.
8579          *
8580          * Handles:     DESIRED -> DESIRED (Special case)
8581          */
8582         if (!(old_state->crtc && old_state->crtc->enabled) &&
8583                 state->crtc && state->crtc->enabled &&
8584                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8585                 dm_con_state->update_hdcp = false;
8586                 return true;
8587         }
8588
8589         /* Hot-plug, headless s3, dpms
8590          *
8591          * Only start HDCP if the display is connected/enabled.
8592          * update_hdcp flag will be set to false until the next
8593          * HPD comes in.
8594          *
8595          * Handles:     DESIRED -> DESIRED (Special case)
8596          */
8597         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8598             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8599                 dm_con_state->update_hdcp = false;
8600                 return true;
8601         }
8602
8603         /*
8604          * Handles:     UNDESIRED -> UNDESIRED
8605          *              DESIRED -> DESIRED
8606          *              ENABLED -> ENABLED
8607          */
8608         if (old_state->content_protection == state->content_protection)
8609                 return false;
8610
8611         /*
8612          * Handles:     UNDESIRED -> DESIRED
8613          *              DESIRED -> UNDESIRED
8614          *              ENABLED -> UNDESIRED
8615          */
8616         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8617                 return true;
8618
8619         /*
8620          * Handles:     DESIRED -> ENABLED
8621          */
8622         return false;
8623 }
8624
8625 #endif
8626 static void remove_stream(struct amdgpu_device *adev,
8627                           struct amdgpu_crtc *acrtc,
8628                           struct dc_stream_state *stream)
8629 {
8630         /* this is the update mode case */
8631
8632         acrtc->otg_inst = -1;
8633         acrtc->enabled = false;
8634 }
8635
8636 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8637                                struct dc_cursor_position *position)
8638 {
8639         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8640         int x, y;
8641         int xorigin = 0, yorigin = 0;
8642
8643         if (!crtc || !plane->state->fb)
8644                 return 0;
8645
8646         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8647             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8648                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8649                           __func__,
8650                           plane->state->crtc_w,
8651                           plane->state->crtc_h);
8652                 return -EINVAL;
8653         }
8654
8655         x = plane->state->crtc_x;
8656         y = plane->state->crtc_y;
8657
8658         if (x <= -amdgpu_crtc->max_cursor_width ||
8659             y <= -amdgpu_crtc->max_cursor_height)
8660                 return 0;
8661
8662         if (x < 0) {
8663                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8664                 x = 0;
8665         }
8666         if (y < 0) {
8667                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8668                 y = 0;
8669         }
8670         position->enable = true;
8671         position->translate_by_source = true;
8672         position->x = x;
8673         position->y = y;
8674         position->x_hotspot = xorigin;
8675         position->y_hotspot = yorigin;
8676
8677         return 0;
8678 }
8679
8680 static void handle_cursor_update(struct drm_plane *plane,
8681                                  struct drm_plane_state *old_plane_state)
8682 {
8683         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8684         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8685         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8686         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8687         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8688         uint64_t address = afb ? afb->address : 0;
8689         struct dc_cursor_position position = {0};
8690         struct dc_cursor_attributes attributes;
8691         int ret;
8692
8693         if (!plane->state->fb && !old_plane_state->fb)
8694                 return;
8695
8696         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8697                       __func__,
8698                       amdgpu_crtc->crtc_id,
8699                       plane->state->crtc_w,
8700                       plane->state->crtc_h);
8701
8702         ret = get_cursor_position(plane, crtc, &position);
8703         if (ret)
8704                 return;
8705
8706         if (!position.enable) {
8707                 /* turn off cursor */
8708                 if (crtc_state && crtc_state->stream) {
8709                         mutex_lock(&adev->dm.dc_lock);
8710                         dc_stream_set_cursor_position(crtc_state->stream,
8711                                                       &position);
8712                         mutex_unlock(&adev->dm.dc_lock);
8713                 }
8714                 return;
8715         }
8716
8717         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8718         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8719
8720         memset(&attributes, 0, sizeof(attributes));
8721         attributes.address.high_part = upper_32_bits(address);
8722         attributes.address.low_part  = lower_32_bits(address);
8723         attributes.width             = plane->state->crtc_w;
8724         attributes.height            = plane->state->crtc_h;
8725         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8726         attributes.rotation_angle    = 0;
8727         attributes.attribute_flags.value = 0;
8728
8729         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8730
8731         if (crtc_state->stream) {
8732                 mutex_lock(&adev->dm.dc_lock);
8733                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8734                                                          &attributes))
8735                         DRM_ERROR("DC failed to set cursor attributes\n");
8736
8737                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8738                                                    &position))
8739                         DRM_ERROR("DC failed to set cursor position\n");
8740                 mutex_unlock(&adev->dm.dc_lock);
8741         }
8742 }
8743
8744 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8745 {
8746
8747         assert_spin_locked(&acrtc->base.dev->event_lock);
8748         WARN_ON(acrtc->event);
8749
8750         acrtc->event = acrtc->base.state->event;
8751
8752         /* Set the flip status */
8753         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8754
8755         /* Mark this event as consumed */
8756         acrtc->base.state->event = NULL;
8757
8758         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8759                      acrtc->crtc_id);
8760 }
8761
8762 static void update_freesync_state_on_stream(
8763         struct amdgpu_display_manager *dm,
8764         struct dm_crtc_state *new_crtc_state,
8765         struct dc_stream_state *new_stream,
8766         struct dc_plane_state *surface,
8767         u32 flip_timestamp_in_us)
8768 {
8769         struct mod_vrr_params vrr_params;
8770         struct dc_info_packet vrr_infopacket = {0};
8771         struct amdgpu_device *adev = dm->adev;
8772         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8773         unsigned long flags;
8774         bool pack_sdp_v1_3 = false;
8775
8776         if (!new_stream)
8777                 return;
8778
8779         /*
8780          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8781          * For now it's sufficient to just guard against these conditions.
8782          */
8783
8784         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8785                 return;
8786
8787         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8788         vrr_params = acrtc->dm_irq_params.vrr_params;
8789
8790         if (surface) {
8791                 mod_freesync_handle_preflip(
8792                         dm->freesync_module,
8793                         surface,
8794                         new_stream,
8795                         flip_timestamp_in_us,
8796                         &vrr_params);
8797
8798                 if (adev->family < AMDGPU_FAMILY_AI &&
8799                     amdgpu_dm_vrr_active(new_crtc_state)) {
8800                         mod_freesync_handle_v_update(dm->freesync_module,
8801                                                      new_stream, &vrr_params);
8802
8803                         /* Need to call this before the frame ends. */
8804                         dc_stream_adjust_vmin_vmax(dm->dc,
8805                                                    new_crtc_state->stream,
8806                                                    &vrr_params.adjust);
8807                 }
8808         }
8809
8810         mod_freesync_build_vrr_infopacket(
8811                 dm->freesync_module,
8812                 new_stream,
8813                 &vrr_params,
8814                 PACKET_TYPE_VRR,
8815                 TRANSFER_FUNC_UNKNOWN,
8816                 &vrr_infopacket,
8817                 pack_sdp_v1_3);
8818
8819         new_crtc_state->freesync_timing_changed |=
8820                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8821                         &vrr_params.adjust,
8822                         sizeof(vrr_params.adjust)) != 0);
8823
8824         new_crtc_state->freesync_vrr_info_changed |=
8825                 (memcmp(&new_crtc_state->vrr_infopacket,
8826                         &vrr_infopacket,
8827                         sizeof(vrr_infopacket)) != 0);
8828
8829         acrtc->dm_irq_params.vrr_params = vrr_params;
8830         new_crtc_state->vrr_infopacket = vrr_infopacket;
8831
8832         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8833         new_stream->vrr_infopacket = vrr_infopacket;
8834
8835         if (new_crtc_state->freesync_vrr_info_changed)
8836                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8837                               new_crtc_state->base.crtc->base.id,
8838                               (int)new_crtc_state->base.vrr_enabled,
8839                               (int)vrr_params.state);
8840
8841         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8842 }
8843
8844 static void update_stream_irq_parameters(
8845         struct amdgpu_display_manager *dm,
8846         struct dm_crtc_state *new_crtc_state)
8847 {
8848         struct dc_stream_state *new_stream = new_crtc_state->stream;
8849         struct mod_vrr_params vrr_params;
8850         struct mod_freesync_config config = new_crtc_state->freesync_config;
8851         struct amdgpu_device *adev = dm->adev;
8852         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8853         unsigned long flags;
8854
8855         if (!new_stream)
8856                 return;
8857
8858         /*
8859          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8860          * For now it's sufficient to just guard against these conditions.
8861          */
8862         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8863                 return;
8864
8865         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8866         vrr_params = acrtc->dm_irq_params.vrr_params;
8867
8868         if (new_crtc_state->vrr_supported &&
8869             config.min_refresh_in_uhz &&
8870             config.max_refresh_in_uhz) {
8871                 /*
8872                  * if freesync compatible mode was set, config.state will be set
8873                  * in atomic check
8874                  */
8875                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8876                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8877                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8878                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8879                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8880                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8881                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8882                 } else {
8883                         config.state = new_crtc_state->base.vrr_enabled ?
8884                                                      VRR_STATE_ACTIVE_VARIABLE :
8885                                                      VRR_STATE_INACTIVE;
8886                 }
8887         } else {
8888                 config.state = VRR_STATE_UNSUPPORTED;
8889         }
8890
8891         mod_freesync_build_vrr_params(dm->freesync_module,
8892                                       new_stream,
8893                                       &config, &vrr_params);
8894
8895         new_crtc_state->freesync_timing_changed |=
8896                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8897                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8898
8899         new_crtc_state->freesync_config = config;
8900         /* Copy state for access from DM IRQ handler */
8901         acrtc->dm_irq_params.freesync_config = config;
8902         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8903         acrtc->dm_irq_params.vrr_params = vrr_params;
8904         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8905 }
8906
8907 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8908                                             struct dm_crtc_state *new_state)
8909 {
8910         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8911         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8912
8913         if (!old_vrr_active && new_vrr_active) {
8914                 /* Transition VRR inactive -> active:
8915                  * While VRR is active, we must not disable vblank irq, as a
8916                  * reenable after disable would compute bogus vblank/pflip
8917                  * timestamps if it likely happened inside display front-porch.
8918                  *
8919                  * We also need vupdate irq for the actual core vblank handling
8920                  * at end of vblank.
8921                  */
8922                 dm_set_vupdate_irq(new_state->base.crtc, true);
8923                 drm_crtc_vblank_get(new_state->base.crtc);
8924                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8925                                  __func__, new_state->base.crtc->base.id);
8926         } else if (old_vrr_active && !new_vrr_active) {
8927                 /* Transition VRR active -> inactive:
8928                  * Allow vblank irq disable again for fixed refresh rate.
8929                  */
8930                 dm_set_vupdate_irq(new_state->base.crtc, false);
8931                 drm_crtc_vblank_put(new_state->base.crtc);
8932                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8933                                  __func__, new_state->base.crtc->base.id);
8934         }
8935 }
8936
8937 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8938 {
8939         struct drm_plane *plane;
8940         struct drm_plane_state *old_plane_state;
8941         int i;
8942
8943         /*
8944          * TODO: Make this per-stream so we don't issue redundant updates for
8945          * commits with multiple streams.
8946          */
8947         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8948                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8949                         handle_cursor_update(plane, old_plane_state);
8950 }
8951
8952 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8953                                     struct dc_state *dc_state,
8954                                     struct drm_device *dev,
8955                                     struct amdgpu_display_manager *dm,
8956                                     struct drm_crtc *pcrtc,
8957                                     bool wait_for_vblank)
8958 {
8959         uint32_t i;
8960         uint64_t timestamp_ns;
8961         struct drm_plane *plane;
8962         struct drm_plane_state *old_plane_state, *new_plane_state;
8963         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8964         struct drm_crtc_state *new_pcrtc_state =
8965                         drm_atomic_get_new_crtc_state(state, pcrtc);
8966         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8967         struct dm_crtc_state *dm_old_crtc_state =
8968                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8969         int planes_count = 0, vpos, hpos;
8970         long r;
8971         unsigned long flags;
8972         struct amdgpu_bo *abo;
8973         uint32_t target_vblank, last_flip_vblank;
8974         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8975         bool pflip_present = false;
8976         struct {
8977                 struct dc_surface_update surface_updates[MAX_SURFACES];
8978                 struct dc_plane_info plane_infos[MAX_SURFACES];
8979                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8980                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8981                 struct dc_stream_update stream_update;
8982         } *bundle;
8983
8984         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8985
8986         if (!bundle) {
8987                 dm_error("Failed to allocate update bundle\n");
8988                 goto cleanup;
8989         }
8990
8991         /*
8992          * Disable the cursor first if we're disabling all the planes.
8993          * It'll remain on the screen after the planes are re-enabled
8994          * if we don't.
8995          */
8996         if (acrtc_state->active_planes == 0)
8997                 amdgpu_dm_commit_cursors(state);
8998
8999         /* update planes when needed */
9000         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9001                 struct drm_crtc *crtc = new_plane_state->crtc;
9002                 struct drm_crtc_state *new_crtc_state;
9003                 struct drm_framebuffer *fb = new_plane_state->fb;
9004                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9005                 bool plane_needs_flip;
9006                 struct dc_plane_state *dc_plane;
9007                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9008
9009                 /* Cursor plane is handled after stream updates */
9010                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9011                         continue;
9012
9013                 if (!fb || !crtc || pcrtc != crtc)
9014                         continue;
9015
9016                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9017                 if (!new_crtc_state->active)
9018                         continue;
9019
9020                 dc_plane = dm_new_plane_state->dc_state;
9021
9022                 bundle->surface_updates[planes_count].surface = dc_plane;
9023                 if (new_pcrtc_state->color_mgmt_changed) {
9024                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9025                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9026                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9027                 }
9028
9029                 fill_dc_scaling_info(dm->adev, new_plane_state,
9030                                      &bundle->scaling_infos[planes_count]);
9031
9032                 bundle->surface_updates[planes_count].scaling_info =
9033                         &bundle->scaling_infos[planes_count];
9034
9035                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9036
9037                 pflip_present = pflip_present || plane_needs_flip;
9038
9039                 if (!plane_needs_flip) {
9040                         planes_count += 1;
9041                         continue;
9042                 }
9043
9044                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9045
9046                 /*
9047                  * Wait for all fences on this FB. Do limited wait to avoid
9048                  * deadlock during GPU reset when this fence will not signal
9049                  * but we hold reservation lock for the BO.
9050                  */
9051                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9052                                           msecs_to_jiffies(5000));
9053                 if (unlikely(r <= 0))
9054                         DRM_ERROR("Waiting for fences timed out!");
9055
9056                 fill_dc_plane_info_and_addr(
9057                         dm->adev, new_plane_state,
9058                         afb->tiling_flags,
9059                         &bundle->plane_infos[planes_count],
9060                         &bundle->flip_addrs[planes_count].address,
9061                         afb->tmz_surface, false);
9062
9063                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9064                                  new_plane_state->plane->index,
9065                                  bundle->plane_infos[planes_count].dcc.enable);
9066
9067                 bundle->surface_updates[planes_count].plane_info =
9068                         &bundle->plane_infos[planes_count];
9069
9070                 /*
9071                  * Only allow immediate flips for fast updates that don't
9072                  * change FB pitch, DCC state, rotation or mirroing.
9073                  */
9074                 bundle->flip_addrs[planes_count].flip_immediate =
9075                         crtc->state->async_flip &&
9076                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9077
9078                 timestamp_ns = ktime_get_ns();
9079                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9080                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9081                 bundle->surface_updates[planes_count].surface = dc_plane;
9082
9083                 if (!bundle->surface_updates[planes_count].surface) {
9084                         DRM_ERROR("No surface for CRTC: id=%d\n",
9085                                         acrtc_attach->crtc_id);
9086                         continue;
9087                 }
9088
9089                 if (plane == pcrtc->primary)
9090                         update_freesync_state_on_stream(
9091                                 dm,
9092                                 acrtc_state,
9093                                 acrtc_state->stream,
9094                                 dc_plane,
9095                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9096
9097                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9098                                  __func__,
9099                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9100                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9101
9102                 planes_count += 1;
9103
9104         }
9105
9106         if (pflip_present) {
9107                 if (!vrr_active) {
9108                         /* Use old throttling in non-vrr fixed refresh rate mode
9109                          * to keep flip scheduling based on target vblank counts
9110                          * working in a backwards compatible way, e.g., for
9111                          * clients using the GLX_OML_sync_control extension or
9112                          * DRI3/Present extension with defined target_msc.
9113                          */
9114                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9115                 }
9116                 else {
9117                         /* For variable refresh rate mode only:
9118                          * Get vblank of last completed flip to avoid > 1 vrr
9119                          * flips per video frame by use of throttling, but allow
9120                          * flip programming anywhere in the possibly large
9121                          * variable vrr vblank interval for fine-grained flip
9122                          * timing control and more opportunity to avoid stutter
9123                          * on late submission of flips.
9124                          */
9125                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9126                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9127                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9128                 }
9129
9130                 target_vblank = last_flip_vblank + wait_for_vblank;
9131
9132                 /*
9133                  * Wait until we're out of the vertical blank period before the one
9134                  * targeted by the flip
9135                  */
9136                 while ((acrtc_attach->enabled &&
9137                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9138                                                             0, &vpos, &hpos, NULL,
9139                                                             NULL, &pcrtc->hwmode)
9140                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9141                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9142                         (int)(target_vblank -
9143                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9144                         usleep_range(1000, 1100);
9145                 }
9146
9147                 /**
9148                  * Prepare the flip event for the pageflip interrupt to handle.
9149                  *
9150                  * This only works in the case where we've already turned on the
9151                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9152                  * from 0 -> n planes we have to skip a hardware generated event
9153                  * and rely on sending it from software.
9154                  */
9155                 if (acrtc_attach->base.state->event &&
9156                     acrtc_state->active_planes > 0 &&
9157                     !acrtc_state->force_dpms_off) {
9158                         drm_crtc_vblank_get(pcrtc);
9159
9160                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9161
9162                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9163                         prepare_flip_isr(acrtc_attach);
9164
9165                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9166                 }
9167
9168                 if (acrtc_state->stream) {
9169                         if (acrtc_state->freesync_vrr_info_changed)
9170                                 bundle->stream_update.vrr_infopacket =
9171                                         &acrtc_state->stream->vrr_infopacket;
9172                 }
9173         }
9174
9175         /* Update the planes if changed or disable if we don't have any. */
9176         if ((planes_count || acrtc_state->active_planes == 0) &&
9177                 acrtc_state->stream) {
9178 #if defined(CONFIG_DRM_AMD_DC_DCN)
9179                 /*
9180                  * If PSR or idle optimizations are enabled then flush out
9181                  * any pending work before hardware programming.
9182                  */
9183                 if (dm->vblank_control_workqueue)
9184                         flush_workqueue(dm->vblank_control_workqueue);
9185 #endif
9186
9187                 bundle->stream_update.stream = acrtc_state->stream;
9188                 if (new_pcrtc_state->mode_changed) {
9189                         bundle->stream_update.src = acrtc_state->stream->src;
9190                         bundle->stream_update.dst = acrtc_state->stream->dst;
9191                 }
9192
9193                 if (new_pcrtc_state->color_mgmt_changed) {
9194                         /*
9195                          * TODO: This isn't fully correct since we've actually
9196                          * already modified the stream in place.
9197                          */
9198                         bundle->stream_update.gamut_remap =
9199                                 &acrtc_state->stream->gamut_remap_matrix;
9200                         bundle->stream_update.output_csc_transform =
9201                                 &acrtc_state->stream->csc_color_matrix;
9202                         bundle->stream_update.out_transfer_func =
9203                                 acrtc_state->stream->out_transfer_func;
9204                 }
9205
9206                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9207                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9208                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9209
9210                 /*
9211                  * If FreeSync state on the stream has changed then we need to
9212                  * re-adjust the min/max bounds now that DC doesn't handle this
9213                  * as part of commit.
9214                  */
9215                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9216                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9217                         dc_stream_adjust_vmin_vmax(
9218                                 dm->dc, acrtc_state->stream,
9219                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9220                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9221                 }
9222                 mutex_lock(&dm->dc_lock);
9223                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9224                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9225                         amdgpu_dm_psr_disable(acrtc_state->stream);
9226
9227                 dc_commit_updates_for_stream(dm->dc,
9228                                                      bundle->surface_updates,
9229                                                      planes_count,
9230                                                      acrtc_state->stream,
9231                                                      &bundle->stream_update,
9232                                                      dc_state);
9233
9234                 /**
9235                  * Enable or disable the interrupts on the backend.
9236                  *
9237                  * Most pipes are put into power gating when unused.
9238                  *
9239                  * When power gating is enabled on a pipe we lose the
9240                  * interrupt enablement state when power gating is disabled.
9241                  *
9242                  * So we need to update the IRQ control state in hardware
9243                  * whenever the pipe turns on (since it could be previously
9244                  * power gated) or off (since some pipes can't be power gated
9245                  * on some ASICs).
9246                  */
9247                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9248                         dm_update_pflip_irq_state(drm_to_adev(dev),
9249                                                   acrtc_attach);
9250
9251                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9252                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9253                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9254                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9255
9256                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9257                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9258                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9259                         struct amdgpu_dm_connector *aconn =
9260                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9261
9262                         if (aconn->psr_skip_count > 0)
9263                                 aconn->psr_skip_count--;
9264
9265                         /* Allow PSR when skip count is 0. */
9266                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9267                 } else {
9268                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9269                 }
9270
9271                 mutex_unlock(&dm->dc_lock);
9272         }
9273
9274         /*
9275          * Update cursor state *after* programming all the planes.
9276          * This avoids redundant programming in the case where we're going
9277          * to be disabling a single plane - those pipes are being disabled.
9278          */
9279         if (acrtc_state->active_planes)
9280                 amdgpu_dm_commit_cursors(state);
9281
9282 cleanup:
9283         kfree(bundle);
9284 }
9285
9286 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9287                                    struct drm_atomic_state *state)
9288 {
9289         struct amdgpu_device *adev = drm_to_adev(dev);
9290         struct amdgpu_dm_connector *aconnector;
9291         struct drm_connector *connector;
9292         struct drm_connector_state *old_con_state, *new_con_state;
9293         struct drm_crtc_state *new_crtc_state;
9294         struct dm_crtc_state *new_dm_crtc_state;
9295         const struct dc_stream_status *status;
9296         int i, inst;
9297
9298         /* Notify device removals. */
9299         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9300                 if (old_con_state->crtc != new_con_state->crtc) {
9301                         /* CRTC changes require notification. */
9302                         goto notify;
9303                 }
9304
9305                 if (!new_con_state->crtc)
9306                         continue;
9307
9308                 new_crtc_state = drm_atomic_get_new_crtc_state(
9309                         state, new_con_state->crtc);
9310
9311                 if (!new_crtc_state)
9312                         continue;
9313
9314                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9315                         continue;
9316
9317         notify:
9318                 aconnector = to_amdgpu_dm_connector(connector);
9319
9320                 mutex_lock(&adev->dm.audio_lock);
9321                 inst = aconnector->audio_inst;
9322                 aconnector->audio_inst = -1;
9323                 mutex_unlock(&adev->dm.audio_lock);
9324
9325                 amdgpu_dm_audio_eld_notify(adev, inst);
9326         }
9327
9328         /* Notify audio device additions. */
9329         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9330                 if (!new_con_state->crtc)
9331                         continue;
9332
9333                 new_crtc_state = drm_atomic_get_new_crtc_state(
9334                         state, new_con_state->crtc);
9335
9336                 if (!new_crtc_state)
9337                         continue;
9338
9339                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9340                         continue;
9341
9342                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9343                 if (!new_dm_crtc_state->stream)
9344                         continue;
9345
9346                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9347                 if (!status)
9348                         continue;
9349
9350                 aconnector = to_amdgpu_dm_connector(connector);
9351
9352                 mutex_lock(&adev->dm.audio_lock);
9353                 inst = status->audio_inst;
9354                 aconnector->audio_inst = inst;
9355                 mutex_unlock(&adev->dm.audio_lock);
9356
9357                 amdgpu_dm_audio_eld_notify(adev, inst);
9358         }
9359 }
9360
9361 /*
9362  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9363  * @crtc_state: the DRM CRTC state
9364  * @stream_state: the DC stream state.
9365  *
9366  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9367  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9368  */
9369 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9370                                                 struct dc_stream_state *stream_state)
9371 {
9372         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9373 }
9374
9375 /**
9376  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9377  * @state: The atomic state to commit
9378  *
9379  * This will tell DC to commit the constructed DC state from atomic_check,
9380  * programming the hardware. Any failures here implies a hardware failure, since
9381  * atomic check should have filtered anything non-kosher.
9382  */
9383 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9384 {
9385         struct drm_device *dev = state->dev;
9386         struct amdgpu_device *adev = drm_to_adev(dev);
9387         struct amdgpu_display_manager *dm = &adev->dm;
9388         struct dm_atomic_state *dm_state;
9389         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9390         uint32_t i, j;
9391         struct drm_crtc *crtc;
9392         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9393         unsigned long flags;
9394         bool wait_for_vblank = true;
9395         struct drm_connector *connector;
9396         struct drm_connector_state *old_con_state, *new_con_state;
9397         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9398         int crtc_disable_count = 0;
9399         bool mode_set_reset_required = false;
9400
9401         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9402
9403         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9404
9405         dm_state = dm_atomic_get_new_state(state);
9406         if (dm_state && dm_state->context) {
9407                 dc_state = dm_state->context;
9408         } else {
9409                 /* No state changes, retain current state. */
9410                 dc_state_temp = dc_create_state(dm->dc);
9411                 ASSERT(dc_state_temp);
9412                 dc_state = dc_state_temp;
9413                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9414         }
9415
9416         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9417                                        new_crtc_state, i) {
9418                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9419
9420                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9421
9422                 if (old_crtc_state->active &&
9423                     (!new_crtc_state->active ||
9424                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9425                         manage_dm_interrupts(adev, acrtc, false);
9426                         dc_stream_release(dm_old_crtc_state->stream);
9427                 }
9428         }
9429
9430         drm_atomic_helper_calc_timestamping_constants(state);
9431
9432         /* update changed items */
9433         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9434                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9435
9436                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9437                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9438
9439                 DRM_DEBUG_ATOMIC(
9440                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9441                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9442                         "connectors_changed:%d\n",
9443                         acrtc->crtc_id,
9444                         new_crtc_state->enable,
9445                         new_crtc_state->active,
9446                         new_crtc_state->planes_changed,
9447                         new_crtc_state->mode_changed,
9448                         new_crtc_state->active_changed,
9449                         new_crtc_state->connectors_changed);
9450
9451                 /* Disable cursor if disabling crtc */
9452                 if (old_crtc_state->active && !new_crtc_state->active) {
9453                         struct dc_cursor_position position;
9454
9455                         memset(&position, 0, sizeof(position));
9456                         mutex_lock(&dm->dc_lock);
9457                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9458                         mutex_unlock(&dm->dc_lock);
9459                 }
9460
9461                 /* Copy all transient state flags into dc state */
9462                 if (dm_new_crtc_state->stream) {
9463                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9464                                                             dm_new_crtc_state->stream);
9465                 }
9466
9467                 /* handles headless hotplug case, updating new_state and
9468                  * aconnector as needed
9469                  */
9470
9471                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9472
9473                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9474
9475                         if (!dm_new_crtc_state->stream) {
9476                                 /*
9477                                  * this could happen because of issues with
9478                                  * userspace notifications delivery.
9479                                  * In this case userspace tries to set mode on
9480                                  * display which is disconnected in fact.
9481                                  * dc_sink is NULL in this case on aconnector.
9482                                  * We expect reset mode will come soon.
9483                                  *
9484                                  * This can also happen when unplug is done
9485                                  * during resume sequence ended
9486                                  *
9487                                  * In this case, we want to pretend we still
9488                                  * have a sink to keep the pipe running so that
9489                                  * hw state is consistent with the sw state
9490                                  */
9491                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9492                                                 __func__, acrtc->base.base.id);
9493                                 continue;
9494                         }
9495
9496                         if (dm_old_crtc_state->stream)
9497                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9498
9499                         pm_runtime_get_noresume(dev->dev);
9500
9501                         acrtc->enabled = true;
9502                         acrtc->hw_mode = new_crtc_state->mode;
9503                         crtc->hwmode = new_crtc_state->mode;
9504                         mode_set_reset_required = true;
9505                 } else if (modereset_required(new_crtc_state)) {
9506                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9507                         /* i.e. reset mode */
9508                         if (dm_old_crtc_state->stream)
9509                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9510
9511                         mode_set_reset_required = true;
9512                 }
9513         } /* for_each_crtc_in_state() */
9514
9515         if (dc_state) {
9516                 /* if there mode set or reset, disable eDP PSR */
9517                 if (mode_set_reset_required) {
9518 #if defined(CONFIG_DRM_AMD_DC_DCN)
9519                         if (dm->vblank_control_workqueue)
9520                                 flush_workqueue(dm->vblank_control_workqueue);
9521 #endif
9522                         amdgpu_dm_psr_disable_all(dm);
9523                 }
9524
9525                 dm_enable_per_frame_crtc_master_sync(dc_state);
9526                 mutex_lock(&dm->dc_lock);
9527                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9528 #if defined(CONFIG_DRM_AMD_DC_DCN)
9529                /* Allow idle optimization when vblank count is 0 for display off */
9530                if (dm->active_vblank_irq_count == 0)
9531                    dc_allow_idle_optimizations(dm->dc,true);
9532 #endif
9533                 mutex_unlock(&dm->dc_lock);
9534         }
9535
9536         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9537                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9538
9539                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9540
9541                 if (dm_new_crtc_state->stream != NULL) {
9542                         const struct dc_stream_status *status =
9543                                         dc_stream_get_status(dm_new_crtc_state->stream);
9544
9545                         if (!status)
9546                                 status = dc_stream_get_status_from_state(dc_state,
9547                                                                          dm_new_crtc_state->stream);
9548                         if (!status)
9549                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9550                         else
9551                                 acrtc->otg_inst = status->primary_otg_inst;
9552                 }
9553         }
9554 #ifdef CONFIG_DRM_AMD_DC_HDCP
9555         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9556                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9557                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9558                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9559
9560                 new_crtc_state = NULL;
9561
9562                 if (acrtc)
9563                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9564
9565                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9566
9567                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9568                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9569                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9570                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9571                         dm_new_con_state->update_hdcp = true;
9572                         continue;
9573                 }
9574
9575                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9576                         hdcp_update_display(
9577                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9578                                 new_con_state->hdcp_content_type,
9579                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9580         }
9581 #endif
9582
9583         /* Handle connector state changes */
9584         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9585                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9586                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9587                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9588                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9589                 struct dc_stream_update stream_update;
9590                 struct dc_info_packet hdr_packet;
9591                 struct dc_stream_status *status = NULL;
9592                 bool abm_changed, hdr_changed, scaling_changed;
9593
9594                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9595                 memset(&stream_update, 0, sizeof(stream_update));
9596
9597                 if (acrtc) {
9598                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9599                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9600                 }
9601
9602                 /* Skip any modesets/resets */
9603                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9604                         continue;
9605
9606                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9607                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9608
9609                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9610                                                              dm_old_con_state);
9611
9612                 abm_changed = dm_new_crtc_state->abm_level !=
9613                               dm_old_crtc_state->abm_level;
9614
9615                 hdr_changed =
9616                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9617
9618                 if (!scaling_changed && !abm_changed && !hdr_changed)
9619                         continue;
9620
9621                 stream_update.stream = dm_new_crtc_state->stream;
9622                 if (scaling_changed) {
9623                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9624                                         dm_new_con_state, dm_new_crtc_state->stream);
9625
9626                         stream_update.src = dm_new_crtc_state->stream->src;
9627                         stream_update.dst = dm_new_crtc_state->stream->dst;
9628                 }
9629
9630                 if (abm_changed) {
9631                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9632
9633                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9634                 }
9635
9636                 if (hdr_changed) {
9637                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9638                         stream_update.hdr_static_metadata = &hdr_packet;
9639                 }
9640
9641                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9642
9643                 if (WARN_ON(!status))
9644                         continue;
9645
9646                 WARN_ON(!status->plane_count);
9647
9648                 /*
9649                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9650                  * Here we create an empty update on each plane.
9651                  * To fix this, DC should permit updating only stream properties.
9652                  */
9653                 for (j = 0; j < status->plane_count; j++)
9654                         dummy_updates[j].surface = status->plane_states[0];
9655
9656
9657                 mutex_lock(&dm->dc_lock);
9658                 dc_commit_updates_for_stream(dm->dc,
9659                                                      dummy_updates,
9660                                                      status->plane_count,
9661                                                      dm_new_crtc_state->stream,
9662                                                      &stream_update,
9663                                                      dc_state);
9664                 mutex_unlock(&dm->dc_lock);
9665         }
9666
9667         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9668         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9669                                       new_crtc_state, i) {
9670                 if (old_crtc_state->active && !new_crtc_state->active)
9671                         crtc_disable_count++;
9672
9673                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9674                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9675
9676                 /* For freesync config update on crtc state and params for irq */
9677                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9678
9679                 /* Handle vrr on->off / off->on transitions */
9680                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9681                                                 dm_new_crtc_state);
9682         }
9683
9684         /**
9685          * Enable interrupts for CRTCs that are newly enabled or went through
9686          * a modeset. It was intentionally deferred until after the front end
9687          * state was modified to wait until the OTG was on and so the IRQ
9688          * handlers didn't access stale or invalid state.
9689          */
9690         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9691                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9692 #ifdef CONFIG_DEBUG_FS
9693                 bool configure_crc = false;
9694                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9695 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9696                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9697 #endif
9698                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9699                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9700                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9701 #endif
9702                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9703
9704                 if (new_crtc_state->active &&
9705                     (!old_crtc_state->active ||
9706                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9707                         dc_stream_retain(dm_new_crtc_state->stream);
9708                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9709                         manage_dm_interrupts(adev, acrtc, true);
9710
9711 #ifdef CONFIG_DEBUG_FS
9712                         /**
9713                          * Frontend may have changed so reapply the CRC capture
9714                          * settings for the stream.
9715                          */
9716                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9717
9718                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9719                                 configure_crc = true;
9720 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9721                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9722                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9723                                         acrtc->dm_irq_params.crc_window.update_win = true;
9724                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9725                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9726                                         crc_rd_wrk->crtc = crtc;
9727                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9728                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9729                                 }
9730 #endif
9731                         }
9732
9733                         if (configure_crc)
9734                                 if (amdgpu_dm_crtc_configure_crc_source(
9735                                         crtc, dm_new_crtc_state, cur_crc_src))
9736                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9737 #endif
9738                 }
9739         }
9740
9741         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9742                 if (new_crtc_state->async_flip)
9743                         wait_for_vblank = false;
9744
9745         /* update planes when needed per crtc*/
9746         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9747                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9748
9749                 if (dm_new_crtc_state->stream)
9750                         amdgpu_dm_commit_planes(state, dc_state, dev,
9751                                                 dm, crtc, wait_for_vblank);
9752         }
9753
9754         /* Update audio instances for each connector. */
9755         amdgpu_dm_commit_audio(dev, state);
9756
9757 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9758         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9759         /* restore the backlight level */
9760         for (i = 0; i < dm->num_of_edps; i++) {
9761                 if (dm->backlight_dev[i] &&
9762                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9763                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9764         }
9765 #endif
9766         /*
9767          * send vblank event on all events not handled in flip and
9768          * mark consumed event for drm_atomic_helper_commit_hw_done
9769          */
9770         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9771         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9772
9773                 if (new_crtc_state->event)
9774                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9775
9776                 new_crtc_state->event = NULL;
9777         }
9778         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9779
9780         /* Signal HW programming completion */
9781         drm_atomic_helper_commit_hw_done(state);
9782
9783         if (wait_for_vblank)
9784                 drm_atomic_helper_wait_for_flip_done(dev, state);
9785
9786         drm_atomic_helper_cleanup_planes(dev, state);
9787
9788         /* return the stolen vga memory back to VRAM */
9789         if (!adev->mman.keep_stolen_vga_memory)
9790                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9791         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9792
9793         /*
9794          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9795          * so we can put the GPU into runtime suspend if we're not driving any
9796          * displays anymore
9797          */
9798         for (i = 0; i < crtc_disable_count; i++)
9799                 pm_runtime_put_autosuspend(dev->dev);
9800         pm_runtime_mark_last_busy(dev->dev);
9801
9802         if (dc_state_temp)
9803                 dc_release_state(dc_state_temp);
9804 }
9805
9806
9807 static int dm_force_atomic_commit(struct drm_connector *connector)
9808 {
9809         int ret = 0;
9810         struct drm_device *ddev = connector->dev;
9811         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9812         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9813         struct drm_plane *plane = disconnected_acrtc->base.primary;
9814         struct drm_connector_state *conn_state;
9815         struct drm_crtc_state *crtc_state;
9816         struct drm_plane_state *plane_state;
9817
9818         if (!state)
9819                 return -ENOMEM;
9820
9821         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9822
9823         /* Construct an atomic state to restore previous display setting */
9824
9825         /*
9826          * Attach connectors to drm_atomic_state
9827          */
9828         conn_state = drm_atomic_get_connector_state(state, connector);
9829
9830         ret = PTR_ERR_OR_ZERO(conn_state);
9831         if (ret)
9832                 goto out;
9833
9834         /* Attach crtc to drm_atomic_state*/
9835         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9836
9837         ret = PTR_ERR_OR_ZERO(crtc_state);
9838         if (ret)
9839                 goto out;
9840
9841         /* force a restore */
9842         crtc_state->mode_changed = true;
9843
9844         /* Attach plane to drm_atomic_state */
9845         plane_state = drm_atomic_get_plane_state(state, plane);
9846
9847         ret = PTR_ERR_OR_ZERO(plane_state);
9848         if (ret)
9849                 goto out;
9850
9851         /* Call commit internally with the state we just constructed */
9852         ret = drm_atomic_commit(state);
9853
9854 out:
9855         drm_atomic_state_put(state);
9856         if (ret)
9857                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9858
9859         return ret;
9860 }
9861
9862 /*
9863  * This function handles all cases when set mode does not come upon hotplug.
9864  * This includes when a display is unplugged then plugged back into the
9865  * same port and when running without usermode desktop manager supprot
9866  */
9867 void dm_restore_drm_connector_state(struct drm_device *dev,
9868                                     struct drm_connector *connector)
9869 {
9870         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9871         struct amdgpu_crtc *disconnected_acrtc;
9872         struct dm_crtc_state *acrtc_state;
9873
9874         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9875                 return;
9876
9877         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9878         if (!disconnected_acrtc)
9879                 return;
9880
9881         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9882         if (!acrtc_state->stream)
9883                 return;
9884
9885         /*
9886          * If the previous sink is not released and different from the current,
9887          * we deduce we are in a state where we can not rely on usermode call
9888          * to turn on the display, so we do it here
9889          */
9890         if (acrtc_state->stream->sink != aconnector->dc_sink)
9891                 dm_force_atomic_commit(&aconnector->base);
9892 }
9893
9894 /*
9895  * Grabs all modesetting locks to serialize against any blocking commits,
9896  * Waits for completion of all non blocking commits.
9897  */
9898 static int do_aquire_global_lock(struct drm_device *dev,
9899                                  struct drm_atomic_state *state)
9900 {
9901         struct drm_crtc *crtc;
9902         struct drm_crtc_commit *commit;
9903         long ret;
9904
9905         /*
9906          * Adding all modeset locks to aquire_ctx will
9907          * ensure that when the framework release it the
9908          * extra locks we are locking here will get released to
9909          */
9910         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9911         if (ret)
9912                 return ret;
9913
9914         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9915                 spin_lock(&crtc->commit_lock);
9916                 commit = list_first_entry_or_null(&crtc->commit_list,
9917                                 struct drm_crtc_commit, commit_entry);
9918                 if (commit)
9919                         drm_crtc_commit_get(commit);
9920                 spin_unlock(&crtc->commit_lock);
9921
9922                 if (!commit)
9923                         continue;
9924
9925                 /*
9926                  * Make sure all pending HW programming completed and
9927                  * page flips done
9928                  */
9929                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9930
9931                 if (ret > 0)
9932                         ret = wait_for_completion_interruptible_timeout(
9933                                         &commit->flip_done, 10*HZ);
9934
9935                 if (ret == 0)
9936                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9937                                   "timed out\n", crtc->base.id, crtc->name);
9938
9939                 drm_crtc_commit_put(commit);
9940         }
9941
9942         return ret < 0 ? ret : 0;
9943 }
9944
9945 static void get_freesync_config_for_crtc(
9946         struct dm_crtc_state *new_crtc_state,
9947         struct dm_connector_state *new_con_state)
9948 {
9949         struct mod_freesync_config config = {0};
9950         struct amdgpu_dm_connector *aconnector =
9951                         to_amdgpu_dm_connector(new_con_state->base.connector);
9952         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9953         int vrefresh = drm_mode_vrefresh(mode);
9954         bool fs_vid_mode = false;
9955
9956         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9957                                         vrefresh >= aconnector->min_vfreq &&
9958                                         vrefresh <= aconnector->max_vfreq;
9959
9960         if (new_crtc_state->vrr_supported) {
9961                 new_crtc_state->stream->ignore_msa_timing_param = true;
9962                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9963
9964                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9965                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9966                 config.vsif_supported = true;
9967                 config.btr = true;
9968
9969                 if (fs_vid_mode) {
9970                         config.state = VRR_STATE_ACTIVE_FIXED;
9971                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9972                         goto out;
9973                 } else if (new_crtc_state->base.vrr_enabled) {
9974                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9975                 } else {
9976                         config.state = VRR_STATE_INACTIVE;
9977                 }
9978         }
9979 out:
9980         new_crtc_state->freesync_config = config;
9981 }
9982
9983 static void reset_freesync_config_for_crtc(
9984         struct dm_crtc_state *new_crtc_state)
9985 {
9986         new_crtc_state->vrr_supported = false;
9987
9988         memset(&new_crtc_state->vrr_infopacket, 0,
9989                sizeof(new_crtc_state->vrr_infopacket));
9990 }
9991
9992 static bool
9993 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9994                                  struct drm_crtc_state *new_crtc_state)
9995 {
9996         struct drm_display_mode old_mode, new_mode;
9997
9998         if (!old_crtc_state || !new_crtc_state)
9999                 return false;
10000
10001         old_mode = old_crtc_state->mode;
10002         new_mode = new_crtc_state->mode;
10003
10004         if (old_mode.clock       == new_mode.clock &&
10005             old_mode.hdisplay    == new_mode.hdisplay &&
10006             old_mode.vdisplay    == new_mode.vdisplay &&
10007             old_mode.htotal      == new_mode.htotal &&
10008             old_mode.vtotal      != new_mode.vtotal &&
10009             old_mode.hsync_start == new_mode.hsync_start &&
10010             old_mode.vsync_start != new_mode.vsync_start &&
10011             old_mode.hsync_end   == new_mode.hsync_end &&
10012             old_mode.vsync_end   != new_mode.vsync_end &&
10013             old_mode.hskew       == new_mode.hskew &&
10014             old_mode.vscan       == new_mode.vscan &&
10015             (old_mode.vsync_end - old_mode.vsync_start) ==
10016             (new_mode.vsync_end - new_mode.vsync_start))
10017                 return true;
10018
10019         return false;
10020 }
10021
10022 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10023         uint64_t num, den, res;
10024         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10025
10026         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10027
10028         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10029         den = (unsigned long long)new_crtc_state->mode.htotal *
10030               (unsigned long long)new_crtc_state->mode.vtotal;
10031
10032         res = div_u64(num, den);
10033         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10034 }
10035
10036 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10037                                 struct drm_atomic_state *state,
10038                                 struct drm_crtc *crtc,
10039                                 struct drm_crtc_state *old_crtc_state,
10040                                 struct drm_crtc_state *new_crtc_state,
10041                                 bool enable,
10042                                 bool *lock_and_validation_needed)
10043 {
10044         struct dm_atomic_state *dm_state = NULL;
10045         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10046         struct dc_stream_state *new_stream;
10047         int ret = 0;
10048
10049         /*
10050          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10051          * update changed items
10052          */
10053         struct amdgpu_crtc *acrtc = NULL;
10054         struct amdgpu_dm_connector *aconnector = NULL;
10055         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10056         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10057
10058         new_stream = NULL;
10059
10060         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10061         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10062         acrtc = to_amdgpu_crtc(crtc);
10063         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10064
10065         /* TODO This hack should go away */
10066         if (aconnector && enable) {
10067                 /* Make sure fake sink is created in plug-in scenario */
10068                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10069                                                             &aconnector->base);
10070                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10071                                                             &aconnector->base);
10072
10073                 if (IS_ERR(drm_new_conn_state)) {
10074                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10075                         goto fail;
10076                 }
10077
10078                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10079                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10080
10081                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10082                         goto skip_modeset;
10083
10084                 new_stream = create_validate_stream_for_sink(aconnector,
10085                                                              &new_crtc_state->mode,
10086                                                              dm_new_conn_state,
10087                                                              dm_old_crtc_state->stream);
10088
10089                 /*
10090                  * we can have no stream on ACTION_SET if a display
10091                  * was disconnected during S3, in this case it is not an
10092                  * error, the OS will be updated after detection, and
10093                  * will do the right thing on next atomic commit
10094                  */
10095
10096                 if (!new_stream) {
10097                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10098                                         __func__, acrtc->base.base.id);
10099                         ret = -ENOMEM;
10100                         goto fail;
10101                 }
10102
10103                 /*
10104                  * TODO: Check VSDB bits to decide whether this should
10105                  * be enabled or not.
10106                  */
10107                 new_stream->triggered_crtc_reset.enabled =
10108                         dm->force_timing_sync;
10109
10110                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10111
10112                 ret = fill_hdr_info_packet(drm_new_conn_state,
10113                                            &new_stream->hdr_static_metadata);
10114                 if (ret)
10115                         goto fail;
10116
10117                 /*
10118                  * If we already removed the old stream from the context
10119                  * (and set the new stream to NULL) then we can't reuse
10120                  * the old stream even if the stream and scaling are unchanged.
10121                  * We'll hit the BUG_ON and black screen.
10122                  *
10123                  * TODO: Refactor this function to allow this check to work
10124                  * in all conditions.
10125                  */
10126                 if (amdgpu_freesync_vid_mode &&
10127                     dm_new_crtc_state->stream &&
10128                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10129                         goto skip_modeset;
10130
10131                 if (dm_new_crtc_state->stream &&
10132                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10133                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10134                         new_crtc_state->mode_changed = false;
10135                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10136                                          new_crtc_state->mode_changed);
10137                 }
10138         }
10139
10140         /* mode_changed flag may get updated above, need to check again */
10141         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10142                 goto skip_modeset;
10143
10144         DRM_DEBUG_ATOMIC(
10145                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10146                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10147                 "connectors_changed:%d\n",
10148                 acrtc->crtc_id,
10149                 new_crtc_state->enable,
10150                 new_crtc_state->active,
10151                 new_crtc_state->planes_changed,
10152                 new_crtc_state->mode_changed,
10153                 new_crtc_state->active_changed,
10154                 new_crtc_state->connectors_changed);
10155
10156         /* Remove stream for any changed/disabled CRTC */
10157         if (!enable) {
10158
10159                 if (!dm_old_crtc_state->stream)
10160                         goto skip_modeset;
10161
10162                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10163                     is_timing_unchanged_for_freesync(new_crtc_state,
10164                                                      old_crtc_state)) {
10165                         new_crtc_state->mode_changed = false;
10166                         DRM_DEBUG_DRIVER(
10167                                 "Mode change not required for front porch change, "
10168                                 "setting mode_changed to %d",
10169                                 new_crtc_state->mode_changed);
10170
10171                         set_freesync_fixed_config(dm_new_crtc_state);
10172
10173                         goto skip_modeset;
10174                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10175                            is_freesync_video_mode(&new_crtc_state->mode,
10176                                                   aconnector)) {
10177                         struct drm_display_mode *high_mode;
10178
10179                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10180                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10181                                 set_freesync_fixed_config(dm_new_crtc_state);
10182                         }
10183                 }
10184
10185                 ret = dm_atomic_get_state(state, &dm_state);
10186                 if (ret)
10187                         goto fail;
10188
10189                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10190                                 crtc->base.id);
10191
10192                 /* i.e. reset mode */
10193                 if (dc_remove_stream_from_ctx(
10194                                 dm->dc,
10195                                 dm_state->context,
10196                                 dm_old_crtc_state->stream) != DC_OK) {
10197                         ret = -EINVAL;
10198                         goto fail;
10199                 }
10200
10201                 dc_stream_release(dm_old_crtc_state->stream);
10202                 dm_new_crtc_state->stream = NULL;
10203
10204                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10205
10206                 *lock_and_validation_needed = true;
10207
10208         } else {/* Add stream for any updated/enabled CRTC */
10209                 /*
10210                  * Quick fix to prevent NULL pointer on new_stream when
10211                  * added MST connectors not found in existing crtc_state in the chained mode
10212                  * TODO: need to dig out the root cause of that
10213                  */
10214                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10215                         goto skip_modeset;
10216
10217                 if (modereset_required(new_crtc_state))
10218                         goto skip_modeset;
10219
10220                 if (modeset_required(new_crtc_state, new_stream,
10221                                      dm_old_crtc_state->stream)) {
10222
10223                         WARN_ON(dm_new_crtc_state->stream);
10224
10225                         ret = dm_atomic_get_state(state, &dm_state);
10226                         if (ret)
10227                                 goto fail;
10228
10229                         dm_new_crtc_state->stream = new_stream;
10230
10231                         dc_stream_retain(new_stream);
10232
10233                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10234                                          crtc->base.id);
10235
10236                         if (dc_add_stream_to_ctx(
10237                                         dm->dc,
10238                                         dm_state->context,
10239                                         dm_new_crtc_state->stream) != DC_OK) {
10240                                 ret = -EINVAL;
10241                                 goto fail;
10242                         }
10243
10244                         *lock_and_validation_needed = true;
10245                 }
10246         }
10247
10248 skip_modeset:
10249         /* Release extra reference */
10250         if (new_stream)
10251                  dc_stream_release(new_stream);
10252
10253         /*
10254          * We want to do dc stream updates that do not require a
10255          * full modeset below.
10256          */
10257         if (!(enable && aconnector && new_crtc_state->active))
10258                 return 0;
10259         /*
10260          * Given above conditions, the dc state cannot be NULL because:
10261          * 1. We're in the process of enabling CRTCs (just been added
10262          *    to the dc context, or already is on the context)
10263          * 2. Has a valid connector attached, and
10264          * 3. Is currently active and enabled.
10265          * => The dc stream state currently exists.
10266          */
10267         BUG_ON(dm_new_crtc_state->stream == NULL);
10268
10269         /* Scaling or underscan settings */
10270         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10271                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10272                 update_stream_scaling_settings(
10273                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10274
10275         /* ABM settings */
10276         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10277
10278         /*
10279          * Color management settings. We also update color properties
10280          * when a modeset is needed, to ensure it gets reprogrammed.
10281          */
10282         if (dm_new_crtc_state->base.color_mgmt_changed ||
10283             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10284                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10285                 if (ret)
10286                         goto fail;
10287         }
10288
10289         /* Update Freesync settings. */
10290         get_freesync_config_for_crtc(dm_new_crtc_state,
10291                                      dm_new_conn_state);
10292
10293         return ret;
10294
10295 fail:
10296         if (new_stream)
10297                 dc_stream_release(new_stream);
10298         return ret;
10299 }
10300
10301 static bool should_reset_plane(struct drm_atomic_state *state,
10302                                struct drm_plane *plane,
10303                                struct drm_plane_state *old_plane_state,
10304                                struct drm_plane_state *new_plane_state)
10305 {
10306         struct drm_plane *other;
10307         struct drm_plane_state *old_other_state, *new_other_state;
10308         struct drm_crtc_state *new_crtc_state;
10309         int i;
10310
10311         /*
10312          * TODO: Remove this hack once the checks below are sufficient
10313          * enough to determine when we need to reset all the planes on
10314          * the stream.
10315          */
10316         if (state->allow_modeset)
10317                 return true;
10318
10319         /* Exit early if we know that we're adding or removing the plane. */
10320         if (old_plane_state->crtc != new_plane_state->crtc)
10321                 return true;
10322
10323         /* old crtc == new_crtc == NULL, plane not in context. */
10324         if (!new_plane_state->crtc)
10325                 return false;
10326
10327         new_crtc_state =
10328                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10329
10330         if (!new_crtc_state)
10331                 return true;
10332
10333         /* CRTC Degamma changes currently require us to recreate planes. */
10334         if (new_crtc_state->color_mgmt_changed)
10335                 return true;
10336
10337         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10338                 return true;
10339
10340         /*
10341          * If there are any new primary or overlay planes being added or
10342          * removed then the z-order can potentially change. To ensure
10343          * correct z-order and pipe acquisition the current DC architecture
10344          * requires us to remove and recreate all existing planes.
10345          *
10346          * TODO: Come up with a more elegant solution for this.
10347          */
10348         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10349                 struct amdgpu_framebuffer *old_afb, *new_afb;
10350                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10351                         continue;
10352
10353                 if (old_other_state->crtc != new_plane_state->crtc &&
10354                     new_other_state->crtc != new_plane_state->crtc)
10355                         continue;
10356
10357                 if (old_other_state->crtc != new_other_state->crtc)
10358                         return true;
10359
10360                 /* Src/dst size and scaling updates. */
10361                 if (old_other_state->src_w != new_other_state->src_w ||
10362                     old_other_state->src_h != new_other_state->src_h ||
10363                     old_other_state->crtc_w != new_other_state->crtc_w ||
10364                     old_other_state->crtc_h != new_other_state->crtc_h)
10365                         return true;
10366
10367                 /* Rotation / mirroring updates. */
10368                 if (old_other_state->rotation != new_other_state->rotation)
10369                         return true;
10370
10371                 /* Blending updates. */
10372                 if (old_other_state->pixel_blend_mode !=
10373                     new_other_state->pixel_blend_mode)
10374                         return true;
10375
10376                 /* Alpha updates. */
10377                 if (old_other_state->alpha != new_other_state->alpha)
10378                         return true;
10379
10380                 /* Colorspace changes. */
10381                 if (old_other_state->color_range != new_other_state->color_range ||
10382                     old_other_state->color_encoding != new_other_state->color_encoding)
10383                         return true;
10384
10385                 /* Framebuffer checks fall at the end. */
10386                 if (!old_other_state->fb || !new_other_state->fb)
10387                         continue;
10388
10389                 /* Pixel format changes can require bandwidth updates. */
10390                 if (old_other_state->fb->format != new_other_state->fb->format)
10391                         return true;
10392
10393                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10394                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10395
10396                 /* Tiling and DCC changes also require bandwidth updates. */
10397                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10398                     old_afb->base.modifier != new_afb->base.modifier)
10399                         return true;
10400         }
10401
10402         return false;
10403 }
10404
10405 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10406                               struct drm_plane_state *new_plane_state,
10407                               struct drm_framebuffer *fb)
10408 {
10409         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10410         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10411         unsigned int pitch;
10412         bool linear;
10413
10414         if (fb->width > new_acrtc->max_cursor_width ||
10415             fb->height > new_acrtc->max_cursor_height) {
10416                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10417                                  new_plane_state->fb->width,
10418                                  new_plane_state->fb->height);
10419                 return -EINVAL;
10420         }
10421         if (new_plane_state->src_w != fb->width << 16 ||
10422             new_plane_state->src_h != fb->height << 16) {
10423                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10424                 return -EINVAL;
10425         }
10426
10427         /* Pitch in pixels */
10428         pitch = fb->pitches[0] / fb->format->cpp[0];
10429
10430         if (fb->width != pitch) {
10431                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10432                                  fb->width, pitch);
10433                 return -EINVAL;
10434         }
10435
10436         switch (pitch) {
10437         case 64:
10438         case 128:
10439         case 256:
10440                 /* FB pitch is supported by cursor plane */
10441                 break;
10442         default:
10443                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10444                 return -EINVAL;
10445         }
10446
10447         /* Core DRM takes care of checking FB modifiers, so we only need to
10448          * check tiling flags when the FB doesn't have a modifier. */
10449         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10450                 if (adev->family < AMDGPU_FAMILY_AI) {
10451                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10452                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10453                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10454                 } else {
10455                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10456                 }
10457                 if (!linear) {
10458                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10459                         return -EINVAL;
10460                 }
10461         }
10462
10463         return 0;
10464 }
10465
10466 static int dm_update_plane_state(struct dc *dc,
10467                                  struct drm_atomic_state *state,
10468                                  struct drm_plane *plane,
10469                                  struct drm_plane_state *old_plane_state,
10470                                  struct drm_plane_state *new_plane_state,
10471                                  bool enable,
10472                                  bool *lock_and_validation_needed)
10473 {
10474
10475         struct dm_atomic_state *dm_state = NULL;
10476         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10477         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10478         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10479         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10480         struct amdgpu_crtc *new_acrtc;
10481         bool needs_reset;
10482         int ret = 0;
10483
10484
10485         new_plane_crtc = new_plane_state->crtc;
10486         old_plane_crtc = old_plane_state->crtc;
10487         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10488         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10489
10490         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10491                 if (!enable || !new_plane_crtc ||
10492                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10493                         return 0;
10494
10495                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10496
10497                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10498                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10499                         return -EINVAL;
10500                 }
10501
10502                 if (new_plane_state->fb) {
10503                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10504                                                  new_plane_state->fb);
10505                         if (ret)
10506                                 return ret;
10507                 }
10508
10509                 return 0;
10510         }
10511
10512         needs_reset = should_reset_plane(state, plane, old_plane_state,
10513                                          new_plane_state);
10514
10515         /* Remove any changed/removed planes */
10516         if (!enable) {
10517                 if (!needs_reset)
10518                         return 0;
10519
10520                 if (!old_plane_crtc)
10521                         return 0;
10522
10523                 old_crtc_state = drm_atomic_get_old_crtc_state(
10524                                 state, old_plane_crtc);
10525                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10526
10527                 if (!dm_old_crtc_state->stream)
10528                         return 0;
10529
10530                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10531                                 plane->base.id, old_plane_crtc->base.id);
10532
10533                 ret = dm_atomic_get_state(state, &dm_state);
10534                 if (ret)
10535                         return ret;
10536
10537                 if (!dc_remove_plane_from_context(
10538                                 dc,
10539                                 dm_old_crtc_state->stream,
10540                                 dm_old_plane_state->dc_state,
10541                                 dm_state->context)) {
10542
10543                         return -EINVAL;
10544                 }
10545
10546
10547                 dc_plane_state_release(dm_old_plane_state->dc_state);
10548                 dm_new_plane_state->dc_state = NULL;
10549
10550                 *lock_and_validation_needed = true;
10551
10552         } else { /* Add new planes */
10553                 struct dc_plane_state *dc_new_plane_state;
10554
10555                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10556                         return 0;
10557
10558                 if (!new_plane_crtc)
10559                         return 0;
10560
10561                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10562                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10563
10564                 if (!dm_new_crtc_state->stream)
10565                         return 0;
10566
10567                 if (!needs_reset)
10568                         return 0;
10569
10570                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10571                 if (ret)
10572                         return ret;
10573
10574                 WARN_ON(dm_new_plane_state->dc_state);
10575
10576                 dc_new_plane_state = dc_create_plane_state(dc);
10577                 if (!dc_new_plane_state)
10578                         return -ENOMEM;
10579
10580                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10581                                  plane->base.id, new_plane_crtc->base.id);
10582
10583                 ret = fill_dc_plane_attributes(
10584                         drm_to_adev(new_plane_crtc->dev),
10585                         dc_new_plane_state,
10586                         new_plane_state,
10587                         new_crtc_state);
10588                 if (ret) {
10589                         dc_plane_state_release(dc_new_plane_state);
10590                         return ret;
10591                 }
10592
10593                 ret = dm_atomic_get_state(state, &dm_state);
10594                 if (ret) {
10595                         dc_plane_state_release(dc_new_plane_state);
10596                         return ret;
10597                 }
10598
10599                 /*
10600                  * Any atomic check errors that occur after this will
10601                  * not need a release. The plane state will be attached
10602                  * to the stream, and therefore part of the atomic
10603                  * state. It'll be released when the atomic state is
10604                  * cleaned.
10605                  */
10606                 if (!dc_add_plane_to_context(
10607                                 dc,
10608                                 dm_new_crtc_state->stream,
10609                                 dc_new_plane_state,
10610                                 dm_state->context)) {
10611
10612                         dc_plane_state_release(dc_new_plane_state);
10613                         return -EINVAL;
10614                 }
10615
10616                 dm_new_plane_state->dc_state = dc_new_plane_state;
10617
10618                 /* Tell DC to do a full surface update every time there
10619                  * is a plane change. Inefficient, but works for now.
10620                  */
10621                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10622
10623                 *lock_and_validation_needed = true;
10624         }
10625
10626
10627         return ret;
10628 }
10629
10630 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10631                                 struct drm_crtc *crtc,
10632                                 struct drm_crtc_state *new_crtc_state)
10633 {
10634         struct drm_plane *cursor = crtc->cursor, *underlying;
10635         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10636         int i;
10637         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10638
10639         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10640          * cursor per pipe but it's going to inherit the scaling and
10641          * positioning from the underlying pipe. Check the cursor plane's
10642          * blending properties match the underlying planes'. */
10643
10644         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10645         if (!new_cursor_state || !new_cursor_state->fb) {
10646                 return 0;
10647         }
10648
10649         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10650                          (new_cursor_state->src_w >> 16);
10651         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10652                          (new_cursor_state->src_h >> 16);
10653
10654         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10655                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10656                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10657                         continue;
10658
10659                 /* Ignore disabled planes */
10660                 if (!new_underlying_state->fb)
10661                         continue;
10662
10663                 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10664                                      (new_underlying_state->src_w >> 16);
10665                 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10666                                      (new_underlying_state->src_h >> 16);
10667
10668                 if (cursor_scale_w != underlying_scale_w ||
10669                     cursor_scale_h != underlying_scale_h) {
10670                         drm_dbg_atomic(crtc->dev,
10671                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10672                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10673                         return -EINVAL;
10674                 }
10675
10676                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10677                 if (new_underlying_state->crtc_x <= 0 &&
10678                     new_underlying_state->crtc_y <= 0 &&
10679                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10680                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10681                         break;
10682         }
10683
10684         return 0;
10685 }
10686
10687 #if defined(CONFIG_DRM_AMD_DC_DCN)
10688 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10689 {
10690         struct drm_connector *connector;
10691         struct drm_connector_state *conn_state;
10692         struct amdgpu_dm_connector *aconnector = NULL;
10693         int i;
10694         for_each_new_connector_in_state(state, connector, conn_state, i) {
10695                 if (conn_state->crtc != crtc)
10696                         continue;
10697
10698                 aconnector = to_amdgpu_dm_connector(connector);
10699                 if (!aconnector->port || !aconnector->mst_port)
10700                         aconnector = NULL;
10701                 else
10702                         break;
10703         }
10704
10705         if (!aconnector)
10706                 return 0;
10707
10708         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10709 }
10710 #endif
10711
10712 /**
10713  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10714  * @dev: The DRM device
10715  * @state: The atomic state to commit
10716  *
10717  * Validate that the given atomic state is programmable by DC into hardware.
10718  * This involves constructing a &struct dc_state reflecting the new hardware
10719  * state we wish to commit, then querying DC to see if it is programmable. It's
10720  * important not to modify the existing DC state. Otherwise, atomic_check
10721  * may unexpectedly commit hardware changes.
10722  *
10723  * When validating the DC state, it's important that the right locks are
10724  * acquired. For full updates case which removes/adds/updates streams on one
10725  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10726  * that any such full update commit will wait for completion of any outstanding
10727  * flip using DRMs synchronization events.
10728  *
10729  * Note that DM adds the affected connectors for all CRTCs in state, when that
10730  * might not seem necessary. This is because DC stream creation requires the
10731  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10732  * be possible but non-trivial - a possible TODO item.
10733  *
10734  * Return: -Error code if validation failed.
10735  */
10736 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10737                                   struct drm_atomic_state *state)
10738 {
10739         struct amdgpu_device *adev = drm_to_adev(dev);
10740         struct dm_atomic_state *dm_state = NULL;
10741         struct dc *dc = adev->dm.dc;
10742         struct drm_connector *connector;
10743         struct drm_connector_state *old_con_state, *new_con_state;
10744         struct drm_crtc *crtc;
10745         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10746         struct drm_plane *plane;
10747         struct drm_plane_state *old_plane_state, *new_plane_state;
10748         enum dc_status status;
10749         int ret, i;
10750         bool lock_and_validation_needed = false;
10751         struct dm_crtc_state *dm_old_crtc_state;
10752 #if defined(CONFIG_DRM_AMD_DC_DCN)
10753         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10754         struct drm_dp_mst_topology_state *mst_state;
10755         struct drm_dp_mst_topology_mgr *mgr;
10756 #endif
10757
10758         trace_amdgpu_dm_atomic_check_begin(state);
10759
10760         ret = drm_atomic_helper_check_modeset(dev, state);
10761         if (ret)
10762                 goto fail;
10763
10764         /* Check connector changes */
10765         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10766                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10767                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10768
10769                 /* Skip connectors that are disabled or part of modeset already. */
10770                 if (!old_con_state->crtc && !new_con_state->crtc)
10771                         continue;
10772
10773                 if (!new_con_state->crtc)
10774                         continue;
10775
10776                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10777                 if (IS_ERR(new_crtc_state)) {
10778                         ret = PTR_ERR(new_crtc_state);
10779                         goto fail;
10780                 }
10781
10782                 if (dm_old_con_state->abm_level !=
10783                     dm_new_con_state->abm_level)
10784                         new_crtc_state->connectors_changed = true;
10785         }
10786
10787 #if defined(CONFIG_DRM_AMD_DC_DCN)
10788         if (dc_resource_is_dsc_encoding_supported(dc)) {
10789                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10790                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10791                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10792                                 if (ret)
10793                                         goto fail;
10794                         }
10795                 }
10796         }
10797 #endif
10798         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10799                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10800
10801                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10802                     !new_crtc_state->color_mgmt_changed &&
10803                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10804                         dm_old_crtc_state->dsc_force_changed == false)
10805                         continue;
10806
10807                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10808                 if (ret)
10809                         goto fail;
10810
10811                 if (!new_crtc_state->enable)
10812                         continue;
10813
10814                 ret = drm_atomic_add_affected_connectors(state, crtc);
10815                 if (ret)
10816                         goto fail;
10817
10818                 ret = drm_atomic_add_affected_planes(state, crtc);
10819                 if (ret)
10820                         goto fail;
10821
10822                 if (dm_old_crtc_state->dsc_force_changed)
10823                         new_crtc_state->mode_changed = true;
10824         }
10825
10826         /*
10827          * Add all primary and overlay planes on the CRTC to the state
10828          * whenever a plane is enabled to maintain correct z-ordering
10829          * and to enable fast surface updates.
10830          */
10831         drm_for_each_crtc(crtc, dev) {
10832                 bool modified = false;
10833
10834                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10835                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10836                                 continue;
10837
10838                         if (new_plane_state->crtc == crtc ||
10839                             old_plane_state->crtc == crtc) {
10840                                 modified = true;
10841                                 break;
10842                         }
10843                 }
10844
10845                 if (!modified)
10846                         continue;
10847
10848                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10849                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10850                                 continue;
10851
10852                         new_plane_state =
10853                                 drm_atomic_get_plane_state(state, plane);
10854
10855                         if (IS_ERR(new_plane_state)) {
10856                                 ret = PTR_ERR(new_plane_state);
10857                                 goto fail;
10858                         }
10859                 }
10860         }
10861
10862         /* Remove exiting planes if they are modified */
10863         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10864                 ret = dm_update_plane_state(dc, state, plane,
10865                                             old_plane_state,
10866                                             new_plane_state,
10867                                             false,
10868                                             &lock_and_validation_needed);
10869                 if (ret)
10870                         goto fail;
10871         }
10872
10873         /* Disable all crtcs which require disable */
10874         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10875                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10876                                            old_crtc_state,
10877                                            new_crtc_state,
10878                                            false,
10879                                            &lock_and_validation_needed);
10880                 if (ret)
10881                         goto fail;
10882         }
10883
10884         /* Enable all crtcs which require enable */
10885         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10886                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10887                                            old_crtc_state,
10888                                            new_crtc_state,
10889                                            true,
10890                                            &lock_and_validation_needed);
10891                 if (ret)
10892                         goto fail;
10893         }
10894
10895         /* Add new/modified planes */
10896         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10897                 ret = dm_update_plane_state(dc, state, plane,
10898                                             old_plane_state,
10899                                             new_plane_state,
10900                                             true,
10901                                             &lock_and_validation_needed);
10902                 if (ret)
10903                         goto fail;
10904         }
10905
10906         /* Run this here since we want to validate the streams we created */
10907         ret = drm_atomic_helper_check_planes(dev, state);
10908         if (ret)
10909                 goto fail;
10910
10911         /* Check cursor planes scaling */
10912         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10913                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10914                 if (ret)
10915                         goto fail;
10916         }
10917
10918         if (state->legacy_cursor_update) {
10919                 /*
10920                  * This is a fast cursor update coming from the plane update
10921                  * helper, check if it can be done asynchronously for better
10922                  * performance.
10923                  */
10924                 state->async_update =
10925                         !drm_atomic_helper_async_check(dev, state);
10926
10927                 /*
10928                  * Skip the remaining global validation if this is an async
10929                  * update. Cursor updates can be done without affecting
10930                  * state or bandwidth calcs and this avoids the performance
10931                  * penalty of locking the private state object and
10932                  * allocating a new dc_state.
10933                  */
10934                 if (state->async_update)
10935                         return 0;
10936         }
10937
10938         /* Check scaling and underscan changes*/
10939         /* TODO Removed scaling changes validation due to inability to commit
10940          * new stream into context w\o causing full reset. Need to
10941          * decide how to handle.
10942          */
10943         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10944                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10945                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10946                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10947
10948                 /* Skip any modesets/resets */
10949                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10950                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10951                         continue;
10952
10953                 /* Skip any thing not scale or underscan changes */
10954                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10955                         continue;
10956
10957                 lock_and_validation_needed = true;
10958         }
10959
10960 #if defined(CONFIG_DRM_AMD_DC_DCN)
10961         /* set the slot info for each mst_state based on the link encoding format */
10962         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10963                 struct amdgpu_dm_connector *aconnector;
10964                 struct drm_connector *connector;
10965                 struct drm_connector_list_iter iter;
10966                 u8 link_coding_cap;
10967
10968                 if (!mgr->mst_state )
10969                         continue;
10970
10971                 drm_connector_list_iter_begin(dev, &iter);
10972                 drm_for_each_connector_iter(connector, &iter) {
10973                         int id = connector->index;
10974
10975                         if (id == mst_state->mgr->conn_base_id) {
10976                                 aconnector = to_amdgpu_dm_connector(connector);
10977                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10978                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10979
10980                                 break;
10981                         }
10982                 }
10983                 drm_connector_list_iter_end(&iter);
10984
10985         }
10986 #endif
10987         /**
10988          * Streams and planes are reset when there are changes that affect
10989          * bandwidth. Anything that affects bandwidth needs to go through
10990          * DC global validation to ensure that the configuration can be applied
10991          * to hardware.
10992          *
10993          * We have to currently stall out here in atomic_check for outstanding
10994          * commits to finish in this case because our IRQ handlers reference
10995          * DRM state directly - we can end up disabling interrupts too early
10996          * if we don't.
10997          *
10998          * TODO: Remove this stall and drop DM state private objects.
10999          */
11000         if (lock_and_validation_needed) {
11001                 ret = dm_atomic_get_state(state, &dm_state);
11002                 if (ret)
11003                         goto fail;
11004
11005                 ret = do_aquire_global_lock(dev, state);
11006                 if (ret)
11007                         goto fail;
11008
11009 #if defined(CONFIG_DRM_AMD_DC_DCN)
11010                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11011                         goto fail;
11012
11013                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11014                 if (ret)
11015                         goto fail;
11016 #endif
11017
11018                 /*
11019                  * Perform validation of MST topology in the state:
11020                  * We need to perform MST atomic check before calling
11021                  * dc_validate_global_state(), or there is a chance
11022                  * to get stuck in an infinite loop and hang eventually.
11023                  */
11024                 ret = drm_dp_mst_atomic_check(state);
11025                 if (ret)
11026                         goto fail;
11027                 status = dc_validate_global_state(dc, dm_state->context, false);
11028                 if (status != DC_OK) {
11029                         drm_dbg_atomic(dev,
11030                                        "DC global validation failure: %s (%d)",
11031                                        dc_status_to_str(status), status);
11032                         ret = -EINVAL;
11033                         goto fail;
11034                 }
11035         } else {
11036                 /*
11037                  * The commit is a fast update. Fast updates shouldn't change
11038                  * the DC context, affect global validation, and can have their
11039                  * commit work done in parallel with other commits not touching
11040                  * the same resource. If we have a new DC context as part of
11041                  * the DM atomic state from validation we need to free it and
11042                  * retain the existing one instead.
11043                  *
11044                  * Furthermore, since the DM atomic state only contains the DC
11045                  * context and can safely be annulled, we can free the state
11046                  * and clear the associated private object now to free
11047                  * some memory and avoid a possible use-after-free later.
11048                  */
11049
11050                 for (i = 0; i < state->num_private_objs; i++) {
11051                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11052
11053                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11054                                 int j = state->num_private_objs-1;
11055
11056                                 dm_atomic_destroy_state(obj,
11057                                                 state->private_objs[i].state);
11058
11059                                 /* If i is not at the end of the array then the
11060                                  * last element needs to be moved to where i was
11061                                  * before the array can safely be truncated.
11062                                  */
11063                                 if (i != j)
11064                                         state->private_objs[i] =
11065                                                 state->private_objs[j];
11066
11067                                 state->private_objs[j].ptr = NULL;
11068                                 state->private_objs[j].state = NULL;
11069                                 state->private_objs[j].old_state = NULL;
11070                                 state->private_objs[j].new_state = NULL;
11071
11072                                 state->num_private_objs = j;
11073                                 break;
11074                         }
11075                 }
11076         }
11077
11078         /* Store the overall update type for use later in atomic check. */
11079         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11080                 struct dm_crtc_state *dm_new_crtc_state =
11081                         to_dm_crtc_state(new_crtc_state);
11082
11083                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11084                                                          UPDATE_TYPE_FULL :
11085                                                          UPDATE_TYPE_FAST;
11086         }
11087
11088         /* Must be success */
11089         WARN_ON(ret);
11090
11091         trace_amdgpu_dm_atomic_check_finish(state, ret);
11092
11093         return ret;
11094
11095 fail:
11096         if (ret == -EDEADLK)
11097                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11098         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11099                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11100         else
11101                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11102
11103         trace_amdgpu_dm_atomic_check_finish(state, ret);
11104
11105         return ret;
11106 }
11107
11108 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11109                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11110 {
11111         uint8_t dpcd_data;
11112         bool capable = false;
11113
11114         if (amdgpu_dm_connector->dc_link &&
11115                 dm_helpers_dp_read_dpcd(
11116                                 NULL,
11117                                 amdgpu_dm_connector->dc_link,
11118                                 DP_DOWN_STREAM_PORT_COUNT,
11119                                 &dpcd_data,
11120                                 sizeof(dpcd_data))) {
11121                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11122         }
11123
11124         return capable;
11125 }
11126
11127 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11128                 unsigned int offset,
11129                 unsigned int total_length,
11130                 uint8_t *data,
11131                 unsigned int length,
11132                 struct amdgpu_hdmi_vsdb_info *vsdb)
11133 {
11134         bool res;
11135         union dmub_rb_cmd cmd;
11136         struct dmub_cmd_send_edid_cea *input;
11137         struct dmub_cmd_edid_cea_output *output;
11138
11139         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11140                 return false;
11141
11142         memset(&cmd, 0, sizeof(cmd));
11143
11144         input = &cmd.edid_cea.data.input;
11145
11146         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11147         cmd.edid_cea.header.sub_type = 0;
11148         cmd.edid_cea.header.payload_bytes =
11149                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11150         input->offset = offset;
11151         input->length = length;
11152         input->total_length = total_length;
11153         memcpy(input->payload, data, length);
11154
11155         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11156         if (!res) {
11157                 DRM_ERROR("EDID CEA parser failed\n");
11158                 return false;
11159         }
11160
11161         output = &cmd.edid_cea.data.output;
11162
11163         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11164                 if (!output->ack.success) {
11165                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11166                                         output->ack.offset);
11167                 }
11168         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11169                 if (!output->amd_vsdb.vsdb_found)
11170                         return false;
11171
11172                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11173                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11174                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11175                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11176         } else {
11177                 DRM_WARN("Unknown EDID CEA parser results\n");
11178                 return false;
11179         }
11180
11181         return true;
11182 }
11183
11184 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11185                 uint8_t *edid_ext, int len,
11186                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11187 {
11188         int i;
11189
11190         /* send extension block to DMCU for parsing */
11191         for (i = 0; i < len; i += 8) {
11192                 bool res;
11193                 int offset;
11194
11195                 /* send 8 bytes a time */
11196                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11197                         return false;
11198
11199                 if (i+8 == len) {
11200                         /* EDID block sent completed, expect result */
11201                         int version, min_rate, max_rate;
11202
11203                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11204                         if (res) {
11205                                 /* amd vsdb found */
11206                                 vsdb_info->freesync_supported = 1;
11207                                 vsdb_info->amd_vsdb_version = version;
11208                                 vsdb_info->min_refresh_rate_hz = min_rate;
11209                                 vsdb_info->max_refresh_rate_hz = max_rate;
11210                                 return true;
11211                         }
11212                         /* not amd vsdb */
11213                         return false;
11214                 }
11215
11216                 /* check for ack*/
11217                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11218                 if (!res)
11219                         return false;
11220         }
11221
11222         return false;
11223 }
11224
11225 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11226                 uint8_t *edid_ext, int len,
11227                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11228 {
11229         int i;
11230
11231         /* send extension block to DMCU for parsing */
11232         for (i = 0; i < len; i += 8) {
11233                 /* send 8 bytes a time */
11234                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11235                         return false;
11236         }
11237
11238         return vsdb_info->freesync_supported;
11239 }
11240
11241 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11242                 uint8_t *edid_ext, int len,
11243                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11244 {
11245         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11246
11247         if (adev->dm.dmub_srv)
11248                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11249         else
11250                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11251 }
11252
11253 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11254                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11255 {
11256         uint8_t *edid_ext = NULL;
11257         int i;
11258         bool valid_vsdb_found = false;
11259
11260         /*----- drm_find_cea_extension() -----*/
11261         /* No EDID or EDID extensions */
11262         if (edid == NULL || edid->extensions == 0)
11263                 return -ENODEV;
11264
11265         /* Find CEA extension */
11266         for (i = 0; i < edid->extensions; i++) {
11267                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11268                 if (edid_ext[0] == CEA_EXT)
11269                         break;
11270         }
11271
11272         if (i == edid->extensions)
11273                 return -ENODEV;
11274
11275         /*----- cea_db_offsets() -----*/
11276         if (edid_ext[0] != CEA_EXT)
11277                 return -ENODEV;
11278
11279         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11280
11281         return valid_vsdb_found ? i : -ENODEV;
11282 }
11283
11284 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11285                                         struct edid *edid)
11286 {
11287         int i = 0;
11288         struct detailed_timing *timing;
11289         struct detailed_non_pixel *data;
11290         struct detailed_data_monitor_range *range;
11291         struct amdgpu_dm_connector *amdgpu_dm_connector =
11292                         to_amdgpu_dm_connector(connector);
11293         struct dm_connector_state *dm_con_state = NULL;
11294         struct dc_sink *sink;
11295
11296         struct drm_device *dev = connector->dev;
11297         struct amdgpu_device *adev = drm_to_adev(dev);
11298         bool freesync_capable = false;
11299         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11300
11301         if (!connector->state) {
11302                 DRM_ERROR("%s - Connector has no state", __func__);
11303                 goto update;
11304         }
11305
11306         sink = amdgpu_dm_connector->dc_sink ?
11307                 amdgpu_dm_connector->dc_sink :
11308                 amdgpu_dm_connector->dc_em_sink;
11309
11310         if (!edid || !sink) {
11311                 dm_con_state = to_dm_connector_state(connector->state);
11312
11313                 amdgpu_dm_connector->min_vfreq = 0;
11314                 amdgpu_dm_connector->max_vfreq = 0;
11315                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11316                 connector->display_info.monitor_range.min_vfreq = 0;
11317                 connector->display_info.monitor_range.max_vfreq = 0;
11318                 freesync_capable = false;
11319
11320                 goto update;
11321         }
11322
11323         dm_con_state = to_dm_connector_state(connector->state);
11324
11325         if (!adev->dm.freesync_module)
11326                 goto update;
11327
11328
11329         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11330                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11331                 bool edid_check_required = false;
11332
11333                 if (edid) {
11334                         edid_check_required = is_dp_capable_without_timing_msa(
11335                                                 adev->dm.dc,
11336                                                 amdgpu_dm_connector);
11337                 }
11338
11339                 if (edid_check_required == true && (edid->version > 1 ||
11340                    (edid->version == 1 && edid->revision > 1))) {
11341                         for (i = 0; i < 4; i++) {
11342
11343                                 timing  = &edid->detailed_timings[i];
11344                                 data    = &timing->data.other_data;
11345                                 range   = &data->data.range;
11346                                 /*
11347                                  * Check if monitor has continuous frequency mode
11348                                  */
11349                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11350                                         continue;
11351                                 /*
11352                                  * Check for flag range limits only. If flag == 1 then
11353                                  * no additional timing information provided.
11354                                  * Default GTF, GTF Secondary curve and CVT are not
11355                                  * supported
11356                                  */
11357                                 if (range->flags != 1)
11358                                         continue;
11359
11360                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11361                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11362                                 amdgpu_dm_connector->pixel_clock_mhz =
11363                                         range->pixel_clock_mhz * 10;
11364
11365                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11366                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11367
11368                                 break;
11369                         }
11370
11371                         if (amdgpu_dm_connector->max_vfreq -
11372                             amdgpu_dm_connector->min_vfreq > 10) {
11373
11374                                 freesync_capable = true;
11375                         }
11376                 }
11377         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11378                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11379                 if (i >= 0 && vsdb_info.freesync_supported) {
11380                         timing  = &edid->detailed_timings[i];
11381                         data    = &timing->data.other_data;
11382
11383                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11384                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11385                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11386                                 freesync_capable = true;
11387
11388                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11389                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11390                 }
11391         }
11392
11393 update:
11394         if (dm_con_state)
11395                 dm_con_state->freesync_capable = freesync_capable;
11396
11397         if (connector->vrr_capable_property)
11398                 drm_connector_set_vrr_capable_property(connector,
11399                                                        freesync_capable);
11400 }
11401
11402 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11403 {
11404         struct amdgpu_device *adev = drm_to_adev(dev);
11405         struct dc *dc = adev->dm.dc;
11406         int i;
11407
11408         mutex_lock(&adev->dm.dc_lock);
11409         if (dc->current_state) {
11410                 for (i = 0; i < dc->current_state->stream_count; ++i)
11411                         dc->current_state->streams[i]
11412                                 ->triggered_crtc_reset.enabled =
11413                                 adev->dm.force_timing_sync;
11414
11415                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11416                 dc_trigger_sync(dc, dc->current_state);
11417         }
11418         mutex_unlock(&adev->dm.dc_lock);
11419 }
11420
11421 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11422                        uint32_t value, const char *func_name)
11423 {
11424 #ifdef DM_CHECK_ADDR_0
11425         if (address == 0) {
11426                 DC_ERR("invalid register write. address = 0");
11427                 return;
11428         }
11429 #endif
11430         cgs_write_register(ctx->cgs_device, address, value);
11431         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11432 }
11433
11434 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11435                           const char *func_name)
11436 {
11437         uint32_t value;
11438 #ifdef DM_CHECK_ADDR_0
11439         if (address == 0) {
11440                 DC_ERR("invalid register read; address = 0\n");
11441                 return 0;
11442         }
11443 #endif
11444
11445         if (ctx->dmub_srv &&
11446             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11447             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11448                 ASSERT(false);
11449                 return 0;
11450         }
11451
11452         value = cgs_read_register(ctx->cgs_device, address);
11453
11454         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11455
11456         return value;
11457 }
11458
11459 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11460         uint8_t status_type, uint32_t *operation_result)
11461 {
11462         struct amdgpu_device *adev = ctx->driver_context;
11463         int return_status = -1;
11464         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11465
11466         if (is_cmd_aux) {
11467                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11468                         return_status = p_notify->aux_reply.length;
11469                         *operation_result = p_notify->result;
11470                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11471                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11472                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11473                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11474                 } else {
11475                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11476                 }
11477         } else {
11478                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11479                         return_status = 0;
11480                         *operation_result = p_notify->sc_status;
11481                 } else {
11482                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11483                 }
11484         }
11485
11486         return return_status;
11487 }
11488
11489 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11490         unsigned int link_index, void *cmd_payload, void *operation_result)
11491 {
11492         struct amdgpu_device *adev = ctx->driver_context;
11493         int ret = 0;
11494
11495         if (is_cmd_aux) {
11496                 dc_process_dmub_aux_transfer_async(ctx->dc,
11497                         link_index, (struct aux_payload *)cmd_payload);
11498         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11499                                         (struct set_config_cmd_payload *)cmd_payload,
11500                                         adev->dm.dmub_notify)) {
11501                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11502                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11503                                         (uint32_t *)operation_result);
11504         }
11505
11506         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11507         if (ret == 0) {
11508                 DRM_ERROR("wait_for_completion_timeout timeout!");
11509                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11510                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11511                                 (uint32_t *)operation_result);
11512         }
11513
11514         if (is_cmd_aux) {
11515                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11516                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11517
11518                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11519                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11520                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11521                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11522                                        adev->dm.dmub_notify->aux_reply.length);
11523                         }
11524                 }
11525         }
11526
11527         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11528                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11529                         (uint32_t *)operation_result);
11530 }
This page took 0.751418 seconds and 4 git commands to generate.