]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'amd-drm-next-5.20-2022-07-14' of https://gitlab.freedesktop.org/agd5f...
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_blend.h>
83 #include <drm/drm_fb_helper.h>
84 #include <drm/drm_fourcc.h>
85 #include <drm/drm_edid.h>
86 #include <drm/drm_vblank.h>
87 #include <drm/drm_audio_component.h>
88 #include <drm/drm_gem_atomic_helper.h>
89
90 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
91
92 #include "dcn/dcn_1_0_offset.h"
93 #include "dcn/dcn_1_0_sh_mask.h"
94 #include "soc15_hw_ip.h"
95 #include "soc15_common.h"
96 #include "vega10_ip_offset.h"
97
98 #include "soc15_common.h"
99
100 #include "gc/gc_11_0_0_offset.h"
101 #include "gc/gc_11_0_0_sh_mask.h"
102
103 #include "modules/inc/mod_freesync.h"
104 #include "modules/power/power_helpers.h"
105 #include "modules/inc/mod_info_packet.h"
106
107 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
109 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
111 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
113 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
115 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
117 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
119 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
121 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
123 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
125 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
127 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
129
130 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
132 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
134
135 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
136 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
137
138 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
139 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
140
141 /* Number of bytes in PSP header for firmware. */
142 #define PSP_HEADER_BYTES 0x100
143
144 /* Number of bytes in PSP footer for firmware. */
145 #define PSP_FOOTER_BYTES 0x100
146
147 /**
148  * DOC: overview
149  *
150  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
151  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
152  * requests into DC requests, and DC responses into DRM responses.
153  *
154  * The root control structure is &struct amdgpu_display_manager.
155  */
156
157 /* basic init/fini API */
158 static int amdgpu_dm_init(struct amdgpu_device *adev);
159 static void amdgpu_dm_fini(struct amdgpu_device *adev);
160 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
161
162 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
163 {
164         switch (link->dpcd_caps.dongle_type) {
165         case DISPLAY_DONGLE_NONE:
166                 return DRM_MODE_SUBCONNECTOR_Native;
167         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
168                 return DRM_MODE_SUBCONNECTOR_VGA;
169         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
170         case DISPLAY_DONGLE_DP_DVI_DONGLE:
171                 return DRM_MODE_SUBCONNECTOR_DVID;
172         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
173         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
174                 return DRM_MODE_SUBCONNECTOR_HDMIA;
175         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
176         default:
177                 return DRM_MODE_SUBCONNECTOR_Unknown;
178         }
179 }
180
181 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
182 {
183         struct dc_link *link = aconnector->dc_link;
184         struct drm_connector *connector = &aconnector->base;
185         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
186
187         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
188                 return;
189
190         if (aconnector->dc_sink)
191                 subconnector = get_subconnector_type(link);
192
193         drm_object_property_set_value(&connector->base,
194                         connector->dev->mode_config.dp_subconnector_property,
195                         subconnector);
196 }
197
198 /*
199  * initializes drm_device display related structures, based on the information
200  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
201  * drm_encoder, drm_mode_config
202  *
203  * Returns 0 on success
204  */
205 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
206 /* removes and deallocates the drm structures, created by the above function */
207 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
208
209 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
210                                 struct drm_plane *plane,
211                                 unsigned long possible_crtcs,
212                                 const struct dc_plane_cap *plane_cap);
213 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
214                                struct drm_plane *plane,
215                                uint32_t link_index);
216 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
217                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
218                                     uint32_t link_index,
219                                     struct amdgpu_encoder *amdgpu_encoder);
220 static int amdgpu_dm_encoder_init(struct drm_device *dev,
221                                   struct amdgpu_encoder *aencoder,
222                                   uint32_t link_index);
223
224 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
225
226 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
227
228 static int amdgpu_dm_atomic_check(struct drm_device *dev,
229                                   struct drm_atomic_state *state);
230
231 static void handle_cursor_update(struct drm_plane *plane,
232                                  struct drm_plane_state *old_plane_state);
233
234 static const struct drm_format_info *
235 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
236
237 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
238 static void handle_hpd_rx_irq(void *param);
239
240 static bool
241 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
242                                  struct drm_crtc_state *new_crtc_state);
243 /*
244  * dm_vblank_get_counter
245  *
246  * @brief
247  * Get counter for number of vertical blanks
248  *
249  * @param
250  * struct amdgpu_device *adev - [in] desired amdgpu device
251  * int disp_idx - [in] which CRTC to get the counter from
252  *
253  * @return
254  * Counter for vertical blanks
255  */
256 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
257 {
258         if (crtc >= adev->mode_info.num_crtc)
259                 return 0;
260         else {
261                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
263                 if (acrtc->dm_irq_params.stream == NULL) {
264                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265                                   crtc);
266                         return 0;
267                 }
268
269                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
270         }
271 }
272
273 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
274                                   u32 *vbl, u32 *position)
275 {
276         uint32_t v_blank_start, v_blank_end, h_position, v_position;
277
278         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
279                 return -EINVAL;
280         else {
281                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
282
283                 if (acrtc->dm_irq_params.stream ==  NULL) {
284                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
285                                   crtc);
286                         return 0;
287                 }
288
289                 /*
290                  * TODO rework base driver to use values directly.
291                  * for now parse it back into reg-format
292                  */
293                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
294                                          &v_blank_start,
295                                          &v_blank_end,
296                                          &h_position,
297                                          &v_position);
298
299                 *position = v_position | (h_position << 16);
300                 *vbl = v_blank_start | (v_blank_end << 16);
301         }
302
303         return 0;
304 }
305
306 static bool dm_is_idle(void *handle)
307 {
308         /* XXX todo */
309         return true;
310 }
311
312 static int dm_wait_for_idle(void *handle)
313 {
314         /* XXX todo */
315         return 0;
316 }
317
318 static bool dm_check_soft_reset(void *handle)
319 {
320         return false;
321 }
322
323 static int dm_soft_reset(void *handle)
324 {
325         /* XXX todo */
326         return 0;
327 }
328
329 static struct amdgpu_crtc *
330 get_crtc_by_otg_inst(struct amdgpu_device *adev,
331                      int otg_inst)
332 {
333         struct drm_device *dev = adev_to_drm(adev);
334         struct drm_crtc *crtc;
335         struct amdgpu_crtc *amdgpu_crtc;
336
337         if (WARN_ON(otg_inst == -1))
338                 return adev->mode_info.crtcs[0];
339
340         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
341                 amdgpu_crtc = to_amdgpu_crtc(crtc);
342
343                 if (amdgpu_crtc->otg_inst == otg_inst)
344                         return amdgpu_crtc;
345         }
346
347         return NULL;
348 }
349
350 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
351 {
352         return acrtc->dm_irq_params.freesync_config.state ==
353                        VRR_STATE_ACTIVE_VARIABLE ||
354                acrtc->dm_irq_params.freesync_config.state ==
355                        VRR_STATE_ACTIVE_FIXED;
356 }
357
358 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
359 {
360         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
361                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
362 }
363
364 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
365                                               struct dm_crtc_state *new_state)
366 {
367         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
368                 return true;
369         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
370                 return true;
371         else
372                 return false;
373 }
374
375 /**
376  * dm_pflip_high_irq() - Handle pageflip interrupt
377  * @interrupt_params: ignored
378  *
379  * Handles the pageflip interrupt by notifying all interested parties
380  * that the pageflip has been completed.
381  */
382 static void dm_pflip_high_irq(void *interrupt_params)
383 {
384         struct amdgpu_crtc *amdgpu_crtc;
385         struct common_irq_params *irq_params = interrupt_params;
386         struct amdgpu_device *adev = irq_params->adev;
387         unsigned long flags;
388         struct drm_pending_vblank_event *e;
389         uint32_t vpos, hpos, v_blank_start, v_blank_end;
390         bool vrr_active;
391
392         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
393
394         /* IRQ could occur when in initial stage */
395         /* TODO work and BO cleanup */
396         if (amdgpu_crtc == NULL) {
397                 DC_LOG_PFLIP("CRTC is null, returning.\n");
398                 return;
399         }
400
401         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
402
403         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
404                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
405                                                  amdgpu_crtc->pflip_status,
406                                                  AMDGPU_FLIP_SUBMITTED,
407                                                  amdgpu_crtc->crtc_id,
408                                                  amdgpu_crtc);
409                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
410                 return;
411         }
412
413         /* page flip completed. */
414         e = amdgpu_crtc->event;
415         amdgpu_crtc->event = NULL;
416
417         WARN_ON(!e);
418
419         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
420
421         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
422         if (!vrr_active ||
423             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
424                                       &v_blank_end, &hpos, &vpos) ||
425             (vpos < v_blank_start)) {
426                 /* Update to correct count and vblank timestamp if racing with
427                  * vblank irq. This also updates to the correct vblank timestamp
428                  * even in VRR mode, as scanout is past the front-porch atm.
429                  */
430                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
431
432                 /* Wake up userspace by sending the pageflip event with proper
433                  * count and timestamp of vblank of flip completion.
434                  */
435                 if (e) {
436                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
437
438                         /* Event sent, so done with vblank for this flip */
439                         drm_crtc_vblank_put(&amdgpu_crtc->base);
440                 }
441         } else if (e) {
442                 /* VRR active and inside front-porch: vblank count and
443                  * timestamp for pageflip event will only be up to date after
444                  * drm_crtc_handle_vblank() has been executed from late vblank
445                  * irq handler after start of back-porch (vline 0). We queue the
446                  * pageflip event for send-out by drm_crtc_handle_vblank() with
447                  * updated timestamp and count, once it runs after us.
448                  *
449                  * We need to open-code this instead of using the helper
450                  * drm_crtc_arm_vblank_event(), as that helper would
451                  * call drm_crtc_accurate_vblank_count(), which we must
452                  * not call in VRR mode while we are in front-porch!
453                  */
454
455                 /* sequence will be replaced by real count during send-out. */
456                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
457                 e->pipe = amdgpu_crtc->crtc_id;
458
459                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
460                 e = NULL;
461         }
462
463         /* Keep track of vblank of this flip for flip throttling. We use the
464          * cooked hw counter, as that one incremented at start of this vblank
465          * of pageflip completion, so last_flip_vblank is the forbidden count
466          * for queueing new pageflips if vsync + VRR is enabled.
467          */
468         amdgpu_crtc->dm_irq_params.last_flip_vblank =
469                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
470
471         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
472         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
473
474         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
475                      amdgpu_crtc->crtc_id, amdgpu_crtc,
476                      vrr_active, (int) !e);
477 }
478
479 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
480 {
481         struct drm_crtc *crtc = &acrtc->base;
482         struct drm_device *dev = crtc->dev;
483         unsigned long flags;
484
485         drm_crtc_handle_vblank(crtc);
486
487         spin_lock_irqsave(&dev->event_lock, flags);
488
489         /* Send completion event for cursor-only commits */
490         if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
491                 drm_crtc_send_vblank_event(crtc, acrtc->event);
492                 drm_crtc_vblank_put(crtc);
493                 acrtc->event = NULL;
494         }
495
496         spin_unlock_irqrestore(&dev->event_lock, flags);
497 }
498
499 static void dm_vupdate_high_irq(void *interrupt_params)
500 {
501         struct common_irq_params *irq_params = interrupt_params;
502         struct amdgpu_device *adev = irq_params->adev;
503         struct amdgpu_crtc *acrtc;
504         struct drm_device *drm_dev;
505         struct drm_vblank_crtc *vblank;
506         ktime_t frame_duration_ns, previous_timestamp;
507         unsigned long flags;
508         int vrr_active;
509
510         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
511
512         if (acrtc) {
513                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
514                 drm_dev = acrtc->base.dev;
515                 vblank = &drm_dev->vblank[acrtc->base.index];
516                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
517                 frame_duration_ns = vblank->time - previous_timestamp;
518
519                 if (frame_duration_ns > 0) {
520                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
521                                                 frame_duration_ns,
522                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
523                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
524                 }
525
526                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
527                               acrtc->crtc_id,
528                               vrr_active);
529
530                 /* Core vblank handling is done here after end of front-porch in
531                  * vrr mode, as vblank timestamping will give valid results
532                  * while now done after front-porch. This will also deliver
533                  * page-flip completion events that have been queued to us
534                  * if a pageflip happened inside front-porch.
535                  */
536                 if (vrr_active) {
537                         dm_crtc_handle_vblank(acrtc);
538
539                         /* BTR processing for pre-DCE12 ASICs */
540                         if (acrtc->dm_irq_params.stream &&
541                             adev->family < AMDGPU_FAMILY_AI) {
542                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
543                                 mod_freesync_handle_v_update(
544                                     adev->dm.freesync_module,
545                                     acrtc->dm_irq_params.stream,
546                                     &acrtc->dm_irq_params.vrr_params);
547
548                                 dc_stream_adjust_vmin_vmax(
549                                     adev->dm.dc,
550                                     acrtc->dm_irq_params.stream,
551                                     &acrtc->dm_irq_params.vrr_params.adjust);
552                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
553                         }
554                 }
555         }
556 }
557
558 /**
559  * dm_crtc_high_irq() - Handles CRTC interrupt
560  * @interrupt_params: used for determining the CRTC instance
561  *
562  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
563  * event handler.
564  */
565 static void dm_crtc_high_irq(void *interrupt_params)
566 {
567         struct common_irq_params *irq_params = interrupt_params;
568         struct amdgpu_device *adev = irq_params->adev;
569         struct amdgpu_crtc *acrtc;
570         unsigned long flags;
571         int vrr_active;
572
573         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
574         if (!acrtc)
575                 return;
576
577         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
578
579         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
580                       vrr_active, acrtc->dm_irq_params.active_planes);
581
582         /**
583          * Core vblank handling at start of front-porch is only possible
584          * in non-vrr mode, as only there vblank timestamping will give
585          * valid results while done in front-porch. Otherwise defer it
586          * to dm_vupdate_high_irq after end of front-porch.
587          */
588         if (!vrr_active)
589                 dm_crtc_handle_vblank(acrtc);
590
591         /**
592          * Following stuff must happen at start of vblank, for crc
593          * computation and below-the-range btr support in vrr mode.
594          */
595         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
596
597         /* BTR updates need to happen before VUPDATE on Vega and above. */
598         if (adev->family < AMDGPU_FAMILY_AI)
599                 return;
600
601         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
602
603         if (acrtc->dm_irq_params.stream &&
604             acrtc->dm_irq_params.vrr_params.supported &&
605             acrtc->dm_irq_params.freesync_config.state ==
606                     VRR_STATE_ACTIVE_VARIABLE) {
607                 mod_freesync_handle_v_update(adev->dm.freesync_module,
608                                              acrtc->dm_irq_params.stream,
609                                              &acrtc->dm_irq_params.vrr_params);
610
611                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
612                                            &acrtc->dm_irq_params.vrr_params.adjust);
613         }
614
615         /*
616          * If there aren't any active_planes then DCH HUBP may be clock-gated.
617          * In that case, pageflip completion interrupts won't fire and pageflip
618          * completion events won't get delivered. Prevent this by sending
619          * pending pageflip events from here if a flip is still pending.
620          *
621          * If any planes are enabled, use dm_pflip_high_irq() instead, to
622          * avoid race conditions between flip programming and completion,
623          * which could cause too early flip completion events.
624          */
625         if (adev->family >= AMDGPU_FAMILY_RV &&
626             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
627             acrtc->dm_irq_params.active_planes == 0) {
628                 if (acrtc->event) {
629                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
630                         acrtc->event = NULL;
631                         drm_crtc_vblank_put(&acrtc->base);
632                 }
633                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
634         }
635
636         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
637 }
638
639 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
640 /**
641  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
642  * DCN generation ASICs
643  * @interrupt_params: interrupt parameters
644  *
645  * Used to set crc window/read out crc value at vertical line 0 position
646  */
647 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
648 {
649         struct common_irq_params *irq_params = interrupt_params;
650         struct amdgpu_device *adev = irq_params->adev;
651         struct amdgpu_crtc *acrtc;
652
653         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
654
655         if (!acrtc)
656                 return;
657
658         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
659 }
660 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
661
662 /**
663  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
664  * @adev: amdgpu_device pointer
665  * @notify: dmub notification structure
666  *
667  * Dmub AUX or SET_CONFIG command completion processing callback
668  * Copies dmub notification to DM which is to be read by AUX command.
669  * issuing thread and also signals the event to wake up the thread.
670  */
671 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
672                                         struct dmub_notification *notify)
673 {
674         if (adev->dm.dmub_notify)
675                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
676         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
677                 complete(&adev->dm.dmub_aux_transfer_done);
678 }
679
680 /**
681  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
682  * @adev: amdgpu_device pointer
683  * @notify: dmub notification structure
684  *
685  * Dmub Hpd interrupt processing callback. Gets displayindex through the
686  * ink index and calls helper to do the processing.
687  */
688 static void dmub_hpd_callback(struct amdgpu_device *adev,
689                               struct dmub_notification *notify)
690 {
691         struct amdgpu_dm_connector *aconnector;
692         struct amdgpu_dm_connector *hpd_aconnector = NULL;
693         struct drm_connector *connector;
694         struct drm_connector_list_iter iter;
695         struct dc_link *link;
696         uint8_t link_index = 0;
697         struct drm_device *dev;
698
699         if (adev == NULL)
700                 return;
701
702         if (notify == NULL) {
703                 DRM_ERROR("DMUB HPD callback notification was NULL");
704                 return;
705         }
706
707         if (notify->link_index > adev->dm.dc->link_count) {
708                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
709                 return;
710         }
711
712         link_index = notify->link_index;
713         link = adev->dm.dc->links[link_index];
714         dev = adev->dm.ddev;
715
716         drm_connector_list_iter_begin(dev, &iter);
717         drm_for_each_connector_iter(connector, &iter) {
718                 aconnector = to_amdgpu_dm_connector(connector);
719                 if (link && aconnector->dc_link == link) {
720                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
721                         hpd_aconnector = aconnector;
722                         break;
723                 }
724         }
725         drm_connector_list_iter_end(&iter);
726
727         if (hpd_aconnector) {
728                 if (notify->type == DMUB_NOTIFICATION_HPD)
729                         handle_hpd_irq_helper(hpd_aconnector);
730                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
731                         handle_hpd_rx_irq(hpd_aconnector);
732         }
733 }
734
735 /**
736  * register_dmub_notify_callback - Sets callback for DMUB notify
737  * @adev: amdgpu_device pointer
738  * @type: Type of dmub notification
739  * @callback: Dmub interrupt callback function
740  * @dmub_int_thread_offload: offload indicator
741  *
742  * API to register a dmub callback handler for a dmub notification
743  * Also sets indicator whether callback processing to be offloaded.
744  * to dmub interrupt handling thread
745  * Return: true if successfully registered, false if there is existing registration
746  */
747 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
748                                           enum dmub_notification_type type,
749                                           dmub_notify_interrupt_callback_t callback,
750                                           bool dmub_int_thread_offload)
751 {
752         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
753                 adev->dm.dmub_callback[type] = callback;
754                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
755         } else
756                 return false;
757
758         return true;
759 }
760
761 static void dm_handle_hpd_work(struct work_struct *work)
762 {
763         struct dmub_hpd_work *dmub_hpd_wrk;
764
765         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
766
767         if (!dmub_hpd_wrk->dmub_notify) {
768                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
769                 return;
770         }
771
772         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
773                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
774                 dmub_hpd_wrk->dmub_notify);
775         }
776
777         kfree(dmub_hpd_wrk->dmub_notify);
778         kfree(dmub_hpd_wrk);
779
780 }
781
782 #define DMUB_TRACE_MAX_READ 64
783 /**
784  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
785  * @interrupt_params: used for determining the Outbox instance
786  *
787  * Handles the Outbox Interrupt
788  * event handler.
789  */
790 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
791 {
792         struct dmub_notification notify;
793         struct common_irq_params *irq_params = interrupt_params;
794         struct amdgpu_device *adev = irq_params->adev;
795         struct amdgpu_display_manager *dm = &adev->dm;
796         struct dmcub_trace_buf_entry entry = { 0 };
797         uint32_t count = 0;
798         struct dmub_hpd_work *dmub_hpd_wrk;
799         struct dc_link *plink = NULL;
800
801         if (dc_enable_dmub_notifications(adev->dm.dc) &&
802                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
803
804                 do {
805                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
806                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
807                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
808                                 continue;
809                         }
810                         if (!dm->dmub_callback[notify.type]) {
811                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
812                                 continue;
813                         }
814                         if (dm->dmub_thread_offload[notify.type] == true) {
815                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
816                                 if (!dmub_hpd_wrk) {
817                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
818                                         return;
819                                 }
820                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
821                                 if (!dmub_hpd_wrk->dmub_notify) {
822                                         kfree(dmub_hpd_wrk);
823                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
824                                         return;
825                                 }
826                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
827                                 if (dmub_hpd_wrk->dmub_notify)
828                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
829                                 dmub_hpd_wrk->adev = adev;
830                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
831                                         plink = adev->dm.dc->links[notify.link_index];
832                                         if (plink) {
833                                                 plink->hpd_status =
834                                                         notify.hpd_status == DP_HPD_PLUG;
835                                         }
836                                 }
837                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
838                         } else {
839                                 dm->dmub_callback[notify.type](adev, &notify);
840                         }
841                 } while (notify.pending_notification);
842         }
843
844
845         do {
846                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
847                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
848                                                         entry.param0, entry.param1);
849
850                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
851                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
852                 } else
853                         break;
854
855                 count++;
856
857         } while (count <= DMUB_TRACE_MAX_READ);
858
859         if (count > DMUB_TRACE_MAX_READ)
860                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
861 }
862
863 static int dm_set_clockgating_state(void *handle,
864                   enum amd_clockgating_state state)
865 {
866         return 0;
867 }
868
869 static int dm_set_powergating_state(void *handle,
870                   enum amd_powergating_state state)
871 {
872         return 0;
873 }
874
875 /* Prototypes of private functions */
876 static int dm_early_init(void* handle);
877
878 /* Allocate memory for FBC compressed data  */
879 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
880 {
881         struct drm_device *dev = connector->dev;
882         struct amdgpu_device *adev = drm_to_adev(dev);
883         struct dm_compressor_info *compressor = &adev->dm.compressor;
884         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
885         struct drm_display_mode *mode;
886         unsigned long max_size = 0;
887
888         if (adev->dm.dc->fbc_compressor == NULL)
889                 return;
890
891         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
892                 return;
893
894         if (compressor->bo_ptr)
895                 return;
896
897
898         list_for_each_entry(mode, &connector->modes, head) {
899                 if (max_size < mode->htotal * mode->vtotal)
900                         max_size = mode->htotal * mode->vtotal;
901         }
902
903         if (max_size) {
904                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
905                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
906                             &compressor->gpu_addr, &compressor->cpu_addr);
907
908                 if (r)
909                         DRM_ERROR("DM: Failed to initialize FBC\n");
910                 else {
911                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
912                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
913                 }
914
915         }
916
917 }
918
919 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
920                                           int pipe, bool *enabled,
921                                           unsigned char *buf, int max_bytes)
922 {
923         struct drm_device *dev = dev_get_drvdata(kdev);
924         struct amdgpu_device *adev = drm_to_adev(dev);
925         struct drm_connector *connector;
926         struct drm_connector_list_iter conn_iter;
927         struct amdgpu_dm_connector *aconnector;
928         int ret = 0;
929
930         *enabled = false;
931
932         mutex_lock(&adev->dm.audio_lock);
933
934         drm_connector_list_iter_begin(dev, &conn_iter);
935         drm_for_each_connector_iter(connector, &conn_iter) {
936                 aconnector = to_amdgpu_dm_connector(connector);
937                 if (aconnector->audio_inst != port)
938                         continue;
939
940                 *enabled = true;
941                 ret = drm_eld_size(connector->eld);
942                 memcpy(buf, connector->eld, min(max_bytes, ret));
943
944                 break;
945         }
946         drm_connector_list_iter_end(&conn_iter);
947
948         mutex_unlock(&adev->dm.audio_lock);
949
950         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
951
952         return ret;
953 }
954
955 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
956         .get_eld = amdgpu_dm_audio_component_get_eld,
957 };
958
959 static int amdgpu_dm_audio_component_bind(struct device *kdev,
960                                        struct device *hda_kdev, void *data)
961 {
962         struct drm_device *dev = dev_get_drvdata(kdev);
963         struct amdgpu_device *adev = drm_to_adev(dev);
964         struct drm_audio_component *acomp = data;
965
966         acomp->ops = &amdgpu_dm_audio_component_ops;
967         acomp->dev = kdev;
968         adev->dm.audio_component = acomp;
969
970         return 0;
971 }
972
973 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
974                                           struct device *hda_kdev, void *data)
975 {
976         struct drm_device *dev = dev_get_drvdata(kdev);
977         struct amdgpu_device *adev = drm_to_adev(dev);
978         struct drm_audio_component *acomp = data;
979
980         acomp->ops = NULL;
981         acomp->dev = NULL;
982         adev->dm.audio_component = NULL;
983 }
984
985 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
986         .bind   = amdgpu_dm_audio_component_bind,
987         .unbind = amdgpu_dm_audio_component_unbind,
988 };
989
990 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
991 {
992         int i, ret;
993
994         if (!amdgpu_audio)
995                 return 0;
996
997         adev->mode_info.audio.enabled = true;
998
999         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1000
1001         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1002                 adev->mode_info.audio.pin[i].channels = -1;
1003                 adev->mode_info.audio.pin[i].rate = -1;
1004                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1005                 adev->mode_info.audio.pin[i].status_bits = 0;
1006                 adev->mode_info.audio.pin[i].category_code = 0;
1007                 adev->mode_info.audio.pin[i].connected = false;
1008                 adev->mode_info.audio.pin[i].id =
1009                         adev->dm.dc->res_pool->audios[i]->inst;
1010                 adev->mode_info.audio.pin[i].offset = 0;
1011         }
1012
1013         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1014         if (ret < 0)
1015                 return ret;
1016
1017         adev->dm.audio_registered = true;
1018
1019         return 0;
1020 }
1021
1022 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1023 {
1024         if (!amdgpu_audio)
1025                 return;
1026
1027         if (!adev->mode_info.audio.enabled)
1028                 return;
1029
1030         if (adev->dm.audio_registered) {
1031                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1032                 adev->dm.audio_registered = false;
1033         }
1034
1035         /* TODO: Disable audio? */
1036
1037         adev->mode_info.audio.enabled = false;
1038 }
1039
1040 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1041 {
1042         struct drm_audio_component *acomp = adev->dm.audio_component;
1043
1044         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1045                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1046
1047                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1048                                                  pin, -1);
1049         }
1050 }
1051
1052 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1053 {
1054         const struct dmcub_firmware_header_v1_0 *hdr;
1055         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1056         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1057         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1058         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1059         struct abm *abm = adev->dm.dc->res_pool->abm;
1060         struct dmub_srv_hw_params hw_params;
1061         enum dmub_status status;
1062         const unsigned char *fw_inst_const, *fw_bss_data;
1063         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1064         bool has_hw_support;
1065
1066         if (!dmub_srv)
1067                 /* DMUB isn't supported on the ASIC. */
1068                 return 0;
1069
1070         if (!fb_info) {
1071                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1072                 return -EINVAL;
1073         }
1074
1075         if (!dmub_fw) {
1076                 /* Firmware required for DMUB support. */
1077                 DRM_ERROR("No firmware provided for DMUB.\n");
1078                 return -EINVAL;
1079         }
1080
1081         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1082         if (status != DMUB_STATUS_OK) {
1083                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1084                 return -EINVAL;
1085         }
1086
1087         if (!has_hw_support) {
1088                 DRM_INFO("DMUB unsupported on ASIC\n");
1089                 return 0;
1090         }
1091
1092         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1093         status = dmub_srv_hw_reset(dmub_srv);
1094         if (status != DMUB_STATUS_OK)
1095                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1096
1097         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1098
1099         fw_inst_const = dmub_fw->data +
1100                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1101                         PSP_HEADER_BYTES;
1102
1103         fw_bss_data = dmub_fw->data +
1104                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1105                       le32_to_cpu(hdr->inst_const_bytes);
1106
1107         /* Copy firmware and bios info into FB memory. */
1108         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1109                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1110
1111         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1112
1113         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1114          * amdgpu_ucode_init_single_fw will load dmub firmware
1115          * fw_inst_const part to cw0; otherwise, the firmware back door load
1116          * will be done by dm_dmub_hw_init
1117          */
1118         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1119                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1120                                 fw_inst_const_size);
1121         }
1122
1123         if (fw_bss_data_size)
1124                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1125                        fw_bss_data, fw_bss_data_size);
1126
1127         /* Copy firmware bios info into FB memory. */
1128         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1129                adev->bios_size);
1130
1131         /* Reset regions that need to be reset. */
1132         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1133         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1134
1135         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1136                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1137
1138         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1139                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1140
1141         /* Initialize hardware. */
1142         memset(&hw_params, 0, sizeof(hw_params));
1143         hw_params.fb_base = adev->gmc.fb_start;
1144         hw_params.fb_offset = adev->gmc.aper_base;
1145
1146         /* backdoor load firmware and trigger dmub running */
1147         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1148                 hw_params.load_inst_const = true;
1149
1150         if (dmcu)
1151                 hw_params.psp_version = dmcu->psp_version;
1152
1153         for (i = 0; i < fb_info->num_fb; ++i)
1154                 hw_params.fb[i] = &fb_info->fb[i];
1155
1156         switch (adev->ip_versions[DCE_HWIP][0]) {
1157         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1158                 hw_params.dpia_supported = true;
1159                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1160                 break;
1161         default:
1162                 break;
1163         }
1164
1165         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1166         if (status != DMUB_STATUS_OK) {
1167                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1168                 return -EINVAL;
1169         }
1170
1171         /* Wait for firmware load to finish. */
1172         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1173         if (status != DMUB_STATUS_OK)
1174                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1175
1176         /* Init DMCU and ABM if available. */
1177         if (dmcu && abm) {
1178                 dmcu->funcs->dmcu_init(dmcu);
1179                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1180         }
1181
1182         if (!adev->dm.dc->ctx->dmub_srv)
1183                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1184         if (!adev->dm.dc->ctx->dmub_srv) {
1185                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1186                 return -ENOMEM;
1187         }
1188
1189         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1190                  adev->dm.dmcub_fw_version);
1191
1192         return 0;
1193 }
1194
1195 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1196 {
1197         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1198         enum dmub_status status;
1199         bool init;
1200
1201         if (!dmub_srv) {
1202                 /* DMUB isn't supported on the ASIC. */
1203                 return;
1204         }
1205
1206         status = dmub_srv_is_hw_init(dmub_srv, &init);
1207         if (status != DMUB_STATUS_OK)
1208                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1209
1210         if (status == DMUB_STATUS_OK && init) {
1211                 /* Wait for firmware load to finish. */
1212                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1213                 if (status != DMUB_STATUS_OK)
1214                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1215         } else {
1216                 /* Perform the full hardware initialization. */
1217                 dm_dmub_hw_init(adev);
1218         }
1219 }
1220
1221 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1222 {
1223         uint64_t pt_base;
1224         uint32_t logical_addr_low;
1225         uint32_t logical_addr_high;
1226         uint32_t agp_base, agp_bot, agp_top;
1227         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1228
1229         memset(pa_config, 0, sizeof(*pa_config));
1230
1231         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1232         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1233
1234         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1235                 /*
1236                  * Raven2 has a HW issue that it is unable to use the vram which
1237                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1238                  * workaround that increase system aperture high address (add 1)
1239                  * to get rid of the VM fault and hardware hang.
1240                  */
1241                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1242         else
1243                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1244
1245         agp_base = 0;
1246         agp_bot = adev->gmc.agp_start >> 24;
1247         agp_top = adev->gmc.agp_end >> 24;
1248
1249
1250         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1251         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1252         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1253         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1254         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1255         page_table_base.low_part = lower_32_bits(pt_base);
1256
1257         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1258         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1259
1260         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1261         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1262         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1263
1264         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1265         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1266         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1267
1268         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1269         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1270         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1271
1272         pa_config->is_hvm_enabled = 0;
1273
1274 }
1275
1276 static void vblank_control_worker(struct work_struct *work)
1277 {
1278         struct vblank_control_work *vblank_work =
1279                 container_of(work, struct vblank_control_work, work);
1280         struct amdgpu_display_manager *dm = vblank_work->dm;
1281
1282         mutex_lock(&dm->dc_lock);
1283
1284         if (vblank_work->enable)
1285                 dm->active_vblank_irq_count++;
1286         else if(dm->active_vblank_irq_count)
1287                 dm->active_vblank_irq_count--;
1288
1289         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1290
1291         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1292
1293         /*
1294          * Control PSR based on vblank requirements from OS
1295          *
1296          * If panel supports PSR SU, there's no need to disable PSR when OS is
1297          * submitting fast atomic commits (we infer this by whether the OS
1298          * requests vblank events). Fast atomic commits will simply trigger a
1299          * full-frame-update (FFU); a specific case of selective-update (SU)
1300          * where the SU region is the full hactive*vactive region. See
1301          * fill_dc_dirty_rects().
1302          */
1303         if (vblank_work->stream && vblank_work->stream->link) {
1304                 if (vblank_work->enable) {
1305                         if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1306                             vblank_work->stream->link->psr_settings.psr_allow_active)
1307                                 amdgpu_dm_psr_disable(vblank_work->stream);
1308                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1309                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1310                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1311                         amdgpu_dm_psr_enable(vblank_work->stream);
1312                 }
1313         }
1314
1315         mutex_unlock(&dm->dc_lock);
1316
1317         dc_stream_release(vblank_work->stream);
1318
1319         kfree(vblank_work);
1320 }
1321
1322 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1323 {
1324         struct hpd_rx_irq_offload_work *offload_work;
1325         struct amdgpu_dm_connector *aconnector;
1326         struct dc_link *dc_link;
1327         struct amdgpu_device *adev;
1328         enum dc_connection_type new_connection_type = dc_connection_none;
1329         unsigned long flags;
1330
1331         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1332         aconnector = offload_work->offload_wq->aconnector;
1333
1334         if (!aconnector) {
1335                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1336                 goto skip;
1337         }
1338
1339         adev = drm_to_adev(aconnector->base.dev);
1340         dc_link = aconnector->dc_link;
1341
1342         mutex_lock(&aconnector->hpd_lock);
1343         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1344                 DRM_ERROR("KMS: Failed to detect connector\n");
1345         mutex_unlock(&aconnector->hpd_lock);
1346
1347         if (new_connection_type == dc_connection_none)
1348                 goto skip;
1349
1350         if (amdgpu_in_reset(adev))
1351                 goto skip;
1352
1353         mutex_lock(&adev->dm.dc_lock);
1354         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1355                 dc_link_dp_handle_automated_test(dc_link);
1356         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1357                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1358                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1359                 dc_link_dp_handle_link_loss(dc_link);
1360                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1361                 offload_work->offload_wq->is_handling_link_loss = false;
1362                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1363         }
1364         mutex_unlock(&adev->dm.dc_lock);
1365
1366 skip:
1367         kfree(offload_work);
1368
1369 }
1370
1371 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1372 {
1373         int max_caps = dc->caps.max_links;
1374         int i = 0;
1375         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1376
1377         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1378
1379         if (!hpd_rx_offload_wq)
1380                 return NULL;
1381
1382
1383         for (i = 0; i < max_caps; i++) {
1384                 hpd_rx_offload_wq[i].wq =
1385                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1386
1387                 if (hpd_rx_offload_wq[i].wq == NULL) {
1388                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1389                         return NULL;
1390                 }
1391
1392                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1393         }
1394
1395         return hpd_rx_offload_wq;
1396 }
1397
1398 struct amdgpu_stutter_quirk {
1399         u16 chip_vendor;
1400         u16 chip_device;
1401         u16 subsys_vendor;
1402         u16 subsys_device;
1403         u8 revision;
1404 };
1405
1406 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1407         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1408         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1409         { 0, 0, 0, 0, 0 },
1410 };
1411
1412 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1413 {
1414         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1415
1416         while (p && p->chip_device != 0) {
1417                 if (pdev->vendor == p->chip_vendor &&
1418                     pdev->device == p->chip_device &&
1419                     pdev->subsystem_vendor == p->subsys_vendor &&
1420                     pdev->subsystem_device == p->subsys_device &&
1421                     pdev->revision == p->revision) {
1422                         return true;
1423                 }
1424                 ++p;
1425         }
1426         return false;
1427 }
1428
1429 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1430         {
1431                 .matches = {
1432                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1433                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1434                 },
1435         },
1436         {
1437                 .matches = {
1438                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1439                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1440                 },
1441         },
1442         {
1443                 .matches = {
1444                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1445                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1446                 },
1447         },
1448         {}
1449 };
1450
1451 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1452 {
1453         const struct dmi_system_id *dmi_id;
1454
1455         dm->aux_hpd_discon_quirk = false;
1456
1457         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1458         if (dmi_id) {
1459                 dm->aux_hpd_discon_quirk = true;
1460                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1461         }
1462 }
1463
1464 static int amdgpu_dm_init(struct amdgpu_device *adev)
1465 {
1466         struct dc_init_data init_data;
1467 #ifdef CONFIG_DRM_AMD_DC_HDCP
1468         struct dc_callback_init init_params;
1469 #endif
1470         int r;
1471
1472         adev->dm.ddev = adev_to_drm(adev);
1473         adev->dm.adev = adev;
1474
1475         /* Zero all the fields */
1476         memset(&init_data, 0, sizeof(init_data));
1477 #ifdef CONFIG_DRM_AMD_DC_HDCP
1478         memset(&init_params, 0, sizeof(init_params));
1479 #endif
1480
1481         mutex_init(&adev->dm.dc_lock);
1482         mutex_init(&adev->dm.audio_lock);
1483         spin_lock_init(&adev->dm.vblank_lock);
1484
1485         if(amdgpu_dm_irq_init(adev)) {
1486                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1487                 goto error;
1488         }
1489
1490         init_data.asic_id.chip_family = adev->family;
1491
1492         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1493         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1494         init_data.asic_id.chip_id = adev->pdev->device;
1495
1496         init_data.asic_id.vram_width = adev->gmc.vram_width;
1497         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1498         init_data.asic_id.atombios_base_address =
1499                 adev->mode_info.atom_context->bios;
1500
1501         init_data.driver = adev;
1502
1503         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1504
1505         if (!adev->dm.cgs_device) {
1506                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1507                 goto error;
1508         }
1509
1510         init_data.cgs_device = adev->dm.cgs_device;
1511
1512         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1513
1514         switch (adev->ip_versions[DCE_HWIP][0]) {
1515         case IP_VERSION(2, 1, 0):
1516                 switch (adev->dm.dmcub_fw_version) {
1517                 case 0: /* development */
1518                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1519                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1520                         init_data.flags.disable_dmcu = false;
1521                         break;
1522                 default:
1523                         init_data.flags.disable_dmcu = true;
1524                 }
1525                 break;
1526         case IP_VERSION(2, 0, 3):
1527                 init_data.flags.disable_dmcu = true;
1528                 break;
1529         default:
1530                 break;
1531         }
1532
1533         switch (adev->asic_type) {
1534         case CHIP_CARRIZO:
1535         case CHIP_STONEY:
1536                 init_data.flags.gpu_vm_support = true;
1537                 break;
1538         default:
1539                 switch (adev->ip_versions[DCE_HWIP][0]) {
1540                 case IP_VERSION(1, 0, 0):
1541                 case IP_VERSION(1, 0, 1):
1542                         /* enable S/G on PCO and RV2 */
1543                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1544                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1545                                 init_data.flags.gpu_vm_support = true;
1546                         break;
1547                 case IP_VERSION(2, 1, 0):
1548                 case IP_VERSION(3, 0, 1):
1549                 case IP_VERSION(3, 1, 2):
1550                 case IP_VERSION(3, 1, 3):
1551                 case IP_VERSION(3, 1, 5):
1552                 case IP_VERSION(3, 1, 6):
1553                         init_data.flags.gpu_vm_support = true;
1554                         break;
1555                 default:
1556                         break;
1557                 }
1558                 break;
1559         }
1560
1561         if (init_data.flags.gpu_vm_support)
1562                 adev->mode_info.gpu_vm_support = true;
1563
1564         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1565                 init_data.flags.fbc_support = true;
1566
1567         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1568                 init_data.flags.multi_mon_pp_mclk_switch = true;
1569
1570         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1571                 init_data.flags.disable_fractional_pwm = true;
1572
1573         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1574                 init_data.flags.edp_no_power_sequencing = true;
1575
1576         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1577                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1578         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1579                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1580
1581         init_data.flags.seamless_boot_edp_requested = false;
1582
1583         if (check_seamless_boot_capability(adev)) {
1584                 init_data.flags.seamless_boot_edp_requested = true;
1585                 init_data.flags.allow_seamless_boot_optimization = true;
1586                 DRM_INFO("Seamless boot condition check passed\n");
1587         }
1588
1589         init_data.flags.enable_mipi_converter_optimization = true;
1590
1591         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1592         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1593
1594         INIT_LIST_HEAD(&adev->dm.da_list);
1595
1596         retrieve_dmi_info(&adev->dm);
1597
1598         /* Display Core create. */
1599         adev->dm.dc = dc_create(&init_data);
1600
1601         if (adev->dm.dc) {
1602                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1603         } else {
1604                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1605                 goto error;
1606         }
1607
1608         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1609                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1610                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1611         }
1612
1613         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1614                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1615         if (dm_should_disable_stutter(adev->pdev))
1616                 adev->dm.dc->debug.disable_stutter = true;
1617
1618         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1619                 adev->dm.dc->debug.disable_stutter = true;
1620
1621         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1622                 adev->dm.dc->debug.disable_dsc = true;
1623                 adev->dm.dc->debug.disable_dsc_edp = true;
1624         }
1625
1626         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1627                 adev->dm.dc->debug.disable_clock_gate = true;
1628
1629         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1630                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1631
1632         r = dm_dmub_hw_init(adev);
1633         if (r) {
1634                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1635                 goto error;
1636         }
1637
1638         dc_hardware_init(adev->dm.dc);
1639
1640         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1641         if (!adev->dm.hpd_rx_offload_wq) {
1642                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1643                 goto error;
1644         }
1645
1646         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1647                 struct dc_phy_addr_space_config pa_config;
1648
1649                 mmhub_read_system_context(adev, &pa_config);
1650
1651                 // Call the DC init_memory func
1652                 dc_setup_system_context(adev->dm.dc, &pa_config);
1653         }
1654
1655         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1656         if (!adev->dm.freesync_module) {
1657                 DRM_ERROR(
1658                 "amdgpu: failed to initialize freesync_module.\n");
1659         } else
1660                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1661                                 adev->dm.freesync_module);
1662
1663         amdgpu_dm_init_color_mod();
1664
1665         if (adev->dm.dc->caps.max_links > 0) {
1666                 adev->dm.vblank_control_workqueue =
1667                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1668                 if (!adev->dm.vblank_control_workqueue)
1669                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1670         }
1671
1672 #ifdef CONFIG_DRM_AMD_DC_HDCP
1673         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1674                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1675
1676                 if (!adev->dm.hdcp_workqueue)
1677                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1678                 else
1679                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1680
1681                 dc_init_callbacks(adev->dm.dc, &init_params);
1682         }
1683 #endif
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1686 #endif
1687         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1688                 init_completion(&adev->dm.dmub_aux_transfer_done);
1689                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1690                 if (!adev->dm.dmub_notify) {
1691                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1692                         goto error;
1693                 }
1694
1695                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1696                 if (!adev->dm.delayed_hpd_wq) {
1697                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1698                         goto error;
1699                 }
1700
1701                 amdgpu_dm_outbox_init(adev);
1702                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1703                         dmub_aux_setconfig_callback, false)) {
1704                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1705                         goto error;
1706                 }
1707                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1708                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1709                         goto error;
1710                 }
1711                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1712                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1713                         goto error;
1714                 }
1715         }
1716
1717         if (amdgpu_dm_initialize_drm_device(adev)) {
1718                 DRM_ERROR(
1719                 "amdgpu: failed to initialize sw for display support.\n");
1720                 goto error;
1721         }
1722
1723         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1724          * It is expected that DMUB will resend any pending notifications at this point, for
1725          * example HPD from DPIA.
1726          */
1727         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1728                 dc_enable_dmub_outbox(adev->dm.dc);
1729
1730         /* create fake encoders for MST */
1731         dm_dp_create_fake_mst_encoders(adev);
1732
1733         /* TODO: Add_display_info? */
1734
1735         /* TODO use dynamic cursor width */
1736         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1737         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1738
1739         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1740                 DRM_ERROR(
1741                 "amdgpu: failed to initialize sw for display support.\n");
1742                 goto error;
1743         }
1744
1745
1746         DRM_DEBUG_DRIVER("KMS initialized.\n");
1747
1748         return 0;
1749 error:
1750         amdgpu_dm_fini(adev);
1751
1752         return -EINVAL;
1753 }
1754
1755 static int amdgpu_dm_early_fini(void *handle)
1756 {
1757         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758
1759         amdgpu_dm_audio_fini(adev);
1760
1761         return 0;
1762 }
1763
1764 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1765 {
1766         int i;
1767
1768         if (adev->dm.vblank_control_workqueue) {
1769                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1770                 adev->dm.vblank_control_workqueue = NULL;
1771         }
1772
1773         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1774                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1775         }
1776
1777         amdgpu_dm_destroy_drm_device(&adev->dm);
1778
1779 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1780         if (adev->dm.crc_rd_wrk) {
1781                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1782                 kfree(adev->dm.crc_rd_wrk);
1783                 adev->dm.crc_rd_wrk = NULL;
1784         }
1785 #endif
1786 #ifdef CONFIG_DRM_AMD_DC_HDCP
1787         if (adev->dm.hdcp_workqueue) {
1788                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1789                 adev->dm.hdcp_workqueue = NULL;
1790         }
1791
1792         if (adev->dm.dc)
1793                 dc_deinit_callbacks(adev->dm.dc);
1794 #endif
1795
1796         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1797
1798         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1799                 kfree(adev->dm.dmub_notify);
1800                 adev->dm.dmub_notify = NULL;
1801                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1802                 adev->dm.delayed_hpd_wq = NULL;
1803         }
1804
1805         if (adev->dm.dmub_bo)
1806                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1807                                       &adev->dm.dmub_bo_gpu_addr,
1808                                       &adev->dm.dmub_bo_cpu_addr);
1809
1810         if (adev->dm.hpd_rx_offload_wq) {
1811                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1812                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1813                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1814                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1815                         }
1816                 }
1817
1818                 kfree(adev->dm.hpd_rx_offload_wq);
1819                 adev->dm.hpd_rx_offload_wq = NULL;
1820         }
1821
1822         /* DC Destroy TODO: Replace destroy DAL */
1823         if (adev->dm.dc)
1824                 dc_destroy(&adev->dm.dc);
1825         /*
1826          * TODO: pageflip, vlank interrupt
1827          *
1828          * amdgpu_dm_irq_fini(adev);
1829          */
1830
1831         if (adev->dm.cgs_device) {
1832                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1833                 adev->dm.cgs_device = NULL;
1834         }
1835         if (adev->dm.freesync_module) {
1836                 mod_freesync_destroy(adev->dm.freesync_module);
1837                 adev->dm.freesync_module = NULL;
1838         }
1839
1840         mutex_destroy(&adev->dm.audio_lock);
1841         mutex_destroy(&adev->dm.dc_lock);
1842
1843         return;
1844 }
1845
1846 static int load_dmcu_fw(struct amdgpu_device *adev)
1847 {
1848         const char *fw_name_dmcu = NULL;
1849         int r;
1850         const struct dmcu_firmware_header_v1_0 *hdr;
1851
1852         switch(adev->asic_type) {
1853 #if defined(CONFIG_DRM_AMD_DC_SI)
1854         case CHIP_TAHITI:
1855         case CHIP_PITCAIRN:
1856         case CHIP_VERDE:
1857         case CHIP_OLAND:
1858 #endif
1859         case CHIP_BONAIRE:
1860         case CHIP_HAWAII:
1861         case CHIP_KAVERI:
1862         case CHIP_KABINI:
1863         case CHIP_MULLINS:
1864         case CHIP_TONGA:
1865         case CHIP_FIJI:
1866         case CHIP_CARRIZO:
1867         case CHIP_STONEY:
1868         case CHIP_POLARIS11:
1869         case CHIP_POLARIS10:
1870         case CHIP_POLARIS12:
1871         case CHIP_VEGAM:
1872         case CHIP_VEGA10:
1873         case CHIP_VEGA12:
1874         case CHIP_VEGA20:
1875                 return 0;
1876         case CHIP_NAVI12:
1877                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1878                 break;
1879         case CHIP_RAVEN:
1880                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1881                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1882                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1883                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1884                 else
1885                         return 0;
1886                 break;
1887         default:
1888                 switch (adev->ip_versions[DCE_HWIP][0]) {
1889                 case IP_VERSION(2, 0, 2):
1890                 case IP_VERSION(2, 0, 3):
1891                 case IP_VERSION(2, 0, 0):
1892                 case IP_VERSION(2, 1, 0):
1893                 case IP_VERSION(3, 0, 0):
1894                 case IP_VERSION(3, 0, 2):
1895                 case IP_VERSION(3, 0, 3):
1896                 case IP_VERSION(3, 0, 1):
1897                 case IP_VERSION(3, 1, 2):
1898                 case IP_VERSION(3, 1, 3):
1899                 case IP_VERSION(3, 1, 5):
1900                 case IP_VERSION(3, 1, 6):
1901                 case IP_VERSION(3, 2, 0):
1902                 case IP_VERSION(3, 2, 1):
1903                         return 0;
1904                 default:
1905                         break;
1906                 }
1907                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1908                 return -EINVAL;
1909         }
1910
1911         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1912                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1913                 return 0;
1914         }
1915
1916         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1917         if (r == -ENOENT) {
1918                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1919                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1920                 adev->dm.fw_dmcu = NULL;
1921                 return 0;
1922         }
1923         if (r) {
1924                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1925                         fw_name_dmcu);
1926                 return r;
1927         }
1928
1929         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1930         if (r) {
1931                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1932                         fw_name_dmcu);
1933                 release_firmware(adev->dm.fw_dmcu);
1934                 adev->dm.fw_dmcu = NULL;
1935                 return r;
1936         }
1937
1938         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1939         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1940         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1941         adev->firmware.fw_size +=
1942                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1943
1944         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1945         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1946         adev->firmware.fw_size +=
1947                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1948
1949         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1950
1951         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1952
1953         return 0;
1954 }
1955
1956 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1957 {
1958         struct amdgpu_device *adev = ctx;
1959
1960         return dm_read_reg(adev->dm.dc->ctx, address);
1961 }
1962
1963 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1964                                      uint32_t value)
1965 {
1966         struct amdgpu_device *adev = ctx;
1967
1968         return dm_write_reg(adev->dm.dc->ctx, address, value);
1969 }
1970
1971 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1972 {
1973         struct dmub_srv_create_params create_params;
1974         struct dmub_srv_region_params region_params;
1975         struct dmub_srv_region_info region_info;
1976         struct dmub_srv_fb_params fb_params;
1977         struct dmub_srv_fb_info *fb_info;
1978         struct dmub_srv *dmub_srv;
1979         const struct dmcub_firmware_header_v1_0 *hdr;
1980         const char *fw_name_dmub;
1981         enum dmub_asic dmub_asic;
1982         enum dmub_status status;
1983         int r;
1984
1985         switch (adev->ip_versions[DCE_HWIP][0]) {
1986         case IP_VERSION(2, 1, 0):
1987                 dmub_asic = DMUB_ASIC_DCN21;
1988                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1989                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1990                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1991                 break;
1992         case IP_VERSION(3, 0, 0):
1993                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1994                         dmub_asic = DMUB_ASIC_DCN30;
1995                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1996                 } else {
1997                         dmub_asic = DMUB_ASIC_DCN30;
1998                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1999                 }
2000                 break;
2001         case IP_VERSION(3, 0, 1):
2002                 dmub_asic = DMUB_ASIC_DCN301;
2003                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
2004                 break;
2005         case IP_VERSION(3, 0, 2):
2006                 dmub_asic = DMUB_ASIC_DCN302;
2007                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
2008                 break;
2009         case IP_VERSION(3, 0, 3):
2010                 dmub_asic = DMUB_ASIC_DCN303;
2011                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2012                 break;
2013         case IP_VERSION(3, 1, 2):
2014         case IP_VERSION(3, 1, 3):
2015                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2016                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2017                 break;
2018         case IP_VERSION(3, 1, 4):
2019                 dmub_asic = DMUB_ASIC_DCN314;
2020                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
2021                 break;
2022         case IP_VERSION(3, 1, 5):
2023                 dmub_asic = DMUB_ASIC_DCN315;
2024                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2025                 break;
2026         case IP_VERSION(3, 1, 6):
2027                 dmub_asic = DMUB_ASIC_DCN316;
2028                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2029                 break;
2030         case IP_VERSION(3, 2, 0):
2031                 dmub_asic = DMUB_ASIC_DCN32;
2032                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2033                 break;
2034         case IP_VERSION(3, 2, 1):
2035                 dmub_asic = DMUB_ASIC_DCN321;
2036                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2037                 break;
2038         default:
2039                 /* ASIC doesn't support DMUB. */
2040                 return 0;
2041         }
2042
2043         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2044         if (r) {
2045                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2046                 return 0;
2047         }
2048
2049         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2050         if (r) {
2051                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2052                 return 0;
2053         }
2054
2055         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2056         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2057
2058         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2059                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2060                         AMDGPU_UCODE_ID_DMCUB;
2061                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2062                         adev->dm.dmub_fw;
2063                 adev->firmware.fw_size +=
2064                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2065
2066                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2067                          adev->dm.dmcub_fw_version);
2068         }
2069
2070
2071         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2072         dmub_srv = adev->dm.dmub_srv;
2073
2074         if (!dmub_srv) {
2075                 DRM_ERROR("Failed to allocate DMUB service!\n");
2076                 return -ENOMEM;
2077         }
2078
2079         memset(&create_params, 0, sizeof(create_params));
2080         create_params.user_ctx = adev;
2081         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2082         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2083         create_params.asic = dmub_asic;
2084
2085         /* Create the DMUB service. */
2086         status = dmub_srv_create(dmub_srv, &create_params);
2087         if (status != DMUB_STATUS_OK) {
2088                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2089                 return -EINVAL;
2090         }
2091
2092         /* Calculate the size of all the regions for the DMUB service. */
2093         memset(&region_params, 0, sizeof(region_params));
2094
2095         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2096                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2097         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2098         region_params.vbios_size = adev->bios_size;
2099         region_params.fw_bss_data = region_params.bss_data_size ?
2100                 adev->dm.dmub_fw->data +
2101                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2102                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2103         region_params.fw_inst_const =
2104                 adev->dm.dmub_fw->data +
2105                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2106                 PSP_HEADER_BYTES;
2107
2108         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2109                                            &region_info);
2110
2111         if (status != DMUB_STATUS_OK) {
2112                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2113                 return -EINVAL;
2114         }
2115
2116         /*
2117          * Allocate a framebuffer based on the total size of all the regions.
2118          * TODO: Move this into GART.
2119          */
2120         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2121                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2122                                     &adev->dm.dmub_bo_gpu_addr,
2123                                     &adev->dm.dmub_bo_cpu_addr);
2124         if (r)
2125                 return r;
2126
2127         /* Rebase the regions on the framebuffer address. */
2128         memset(&fb_params, 0, sizeof(fb_params));
2129         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2130         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2131         fb_params.region_info = &region_info;
2132
2133         adev->dm.dmub_fb_info =
2134                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2135         fb_info = adev->dm.dmub_fb_info;
2136
2137         if (!fb_info) {
2138                 DRM_ERROR(
2139                         "Failed to allocate framebuffer info for DMUB service!\n");
2140                 return -ENOMEM;
2141         }
2142
2143         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2144         if (status != DMUB_STATUS_OK) {
2145                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2146                 return -EINVAL;
2147         }
2148
2149         return 0;
2150 }
2151
2152 static int dm_sw_init(void *handle)
2153 {
2154         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2155         int r;
2156
2157         r = dm_dmub_sw_init(adev);
2158         if (r)
2159                 return r;
2160
2161         return load_dmcu_fw(adev);
2162 }
2163
2164 static int dm_sw_fini(void *handle)
2165 {
2166         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2167
2168         kfree(adev->dm.dmub_fb_info);
2169         adev->dm.dmub_fb_info = NULL;
2170
2171         if (adev->dm.dmub_srv) {
2172                 dmub_srv_destroy(adev->dm.dmub_srv);
2173                 adev->dm.dmub_srv = NULL;
2174         }
2175
2176         release_firmware(adev->dm.dmub_fw);
2177         adev->dm.dmub_fw = NULL;
2178
2179         release_firmware(adev->dm.fw_dmcu);
2180         adev->dm.fw_dmcu = NULL;
2181
2182         return 0;
2183 }
2184
2185 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2186 {
2187         struct amdgpu_dm_connector *aconnector;
2188         struct drm_connector *connector;
2189         struct drm_connector_list_iter iter;
2190         int ret = 0;
2191
2192         drm_connector_list_iter_begin(dev, &iter);
2193         drm_for_each_connector_iter(connector, &iter) {
2194                 aconnector = to_amdgpu_dm_connector(connector);
2195                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2196                     aconnector->mst_mgr.aux) {
2197                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2198                                          aconnector,
2199                                          aconnector->base.base.id);
2200
2201                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2202                         if (ret < 0) {
2203                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2204                                 aconnector->dc_link->type =
2205                                         dc_connection_single;
2206                                 break;
2207                         }
2208                 }
2209         }
2210         drm_connector_list_iter_end(&iter);
2211
2212         return ret;
2213 }
2214
2215 static int dm_late_init(void *handle)
2216 {
2217         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2218
2219         struct dmcu_iram_parameters params;
2220         unsigned int linear_lut[16];
2221         int i;
2222         struct dmcu *dmcu = NULL;
2223
2224         dmcu = adev->dm.dc->res_pool->dmcu;
2225
2226         for (i = 0; i < 16; i++)
2227                 linear_lut[i] = 0xFFFF * i / 15;
2228
2229         params.set = 0;
2230         params.backlight_ramping_override = false;
2231         params.backlight_ramping_start = 0xCCCC;
2232         params.backlight_ramping_reduction = 0xCCCCCCCC;
2233         params.backlight_lut_array_size = 16;
2234         params.backlight_lut_array = linear_lut;
2235
2236         /* Min backlight level after ABM reduction,  Don't allow below 1%
2237          * 0xFFFF x 0.01 = 0x28F
2238          */
2239         params.min_abm_backlight = 0x28F;
2240         /* In the case where abm is implemented on dmcub,
2241         * dmcu object will be null.
2242         * ABM 2.4 and up are implemented on dmcub.
2243         */
2244         if (dmcu) {
2245                 if (!dmcu_load_iram(dmcu, params))
2246                         return -EINVAL;
2247         } else if (adev->dm.dc->ctx->dmub_srv) {
2248                 struct dc_link *edp_links[MAX_NUM_EDP];
2249                 int edp_num;
2250
2251                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2252                 for (i = 0; i < edp_num; i++) {
2253                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2254                                 return -EINVAL;
2255                 }
2256         }
2257
2258         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2259 }
2260
2261 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2262 {
2263         struct amdgpu_dm_connector *aconnector;
2264         struct drm_connector *connector;
2265         struct drm_connector_list_iter iter;
2266         struct drm_dp_mst_topology_mgr *mgr;
2267         int ret;
2268         bool need_hotplug = false;
2269
2270         drm_connector_list_iter_begin(dev, &iter);
2271         drm_for_each_connector_iter(connector, &iter) {
2272                 aconnector = to_amdgpu_dm_connector(connector);
2273                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2274                     aconnector->mst_port)
2275                         continue;
2276
2277                 mgr = &aconnector->mst_mgr;
2278
2279                 if (suspend) {
2280                         drm_dp_mst_topology_mgr_suspend(mgr);
2281                 } else {
2282                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2283                         if (ret < 0) {
2284                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2285                                         aconnector->dc_link);
2286                                 need_hotplug = true;
2287                         }
2288                 }
2289         }
2290         drm_connector_list_iter_end(&iter);
2291
2292         if (need_hotplug)
2293                 drm_kms_helper_hotplug_event(dev);
2294 }
2295
2296 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2297 {
2298         int ret = 0;
2299
2300         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2301          * on window driver dc implementation.
2302          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2303          * should be passed to smu during boot up and resume from s3.
2304          * boot up: dc calculate dcn watermark clock settings within dc_create,
2305          * dcn20_resource_construct
2306          * then call pplib functions below to pass the settings to smu:
2307          * smu_set_watermarks_for_clock_ranges
2308          * smu_set_watermarks_table
2309          * navi10_set_watermarks_table
2310          * smu_write_watermarks_table
2311          *
2312          * For Renoir, clock settings of dcn watermark are also fixed values.
2313          * dc has implemented different flow for window driver:
2314          * dc_hardware_init / dc_set_power_state
2315          * dcn10_init_hw
2316          * notify_wm_ranges
2317          * set_wm_ranges
2318          * -- Linux
2319          * smu_set_watermarks_for_clock_ranges
2320          * renoir_set_watermarks_table
2321          * smu_write_watermarks_table
2322          *
2323          * For Linux,
2324          * dc_hardware_init -> amdgpu_dm_init
2325          * dc_set_power_state --> dm_resume
2326          *
2327          * therefore, this function apply to navi10/12/14 but not Renoir
2328          * *
2329          */
2330         switch (adev->ip_versions[DCE_HWIP][0]) {
2331         case IP_VERSION(2, 0, 2):
2332         case IP_VERSION(2, 0, 0):
2333                 break;
2334         default:
2335                 return 0;
2336         }
2337
2338         ret = amdgpu_dpm_write_watermarks_table(adev);
2339         if (ret) {
2340                 DRM_ERROR("Failed to update WMTABLE!\n");
2341                 return ret;
2342         }
2343
2344         return 0;
2345 }
2346
2347 /**
2348  * dm_hw_init() - Initialize DC device
2349  * @handle: The base driver device containing the amdgpu_dm device.
2350  *
2351  * Initialize the &struct amdgpu_display_manager device. This involves calling
2352  * the initializers of each DM component, then populating the struct with them.
2353  *
2354  * Although the function implies hardware initialization, both hardware and
2355  * software are initialized here. Splitting them out to their relevant init
2356  * hooks is a future TODO item.
2357  *
2358  * Some notable things that are initialized here:
2359  *
2360  * - Display Core, both software and hardware
2361  * - DC modules that we need (freesync and color management)
2362  * - DRM software states
2363  * - Interrupt sources and handlers
2364  * - Vblank support
2365  * - Debug FS entries, if enabled
2366  */
2367 static int dm_hw_init(void *handle)
2368 {
2369         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2370         /* Create DAL display manager */
2371         amdgpu_dm_init(adev);
2372         amdgpu_dm_hpd_init(adev);
2373
2374         return 0;
2375 }
2376
2377 /**
2378  * dm_hw_fini() - Teardown DC device
2379  * @handle: The base driver device containing the amdgpu_dm device.
2380  *
2381  * Teardown components within &struct amdgpu_display_manager that require
2382  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2383  * were loaded. Also flush IRQ workqueues and disable them.
2384  */
2385 static int dm_hw_fini(void *handle)
2386 {
2387         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2388
2389         amdgpu_dm_hpd_fini(adev);
2390
2391         amdgpu_dm_irq_fini(adev);
2392         amdgpu_dm_fini(adev);
2393         return 0;
2394 }
2395
2396
2397 static int dm_enable_vblank(struct drm_crtc *crtc);
2398 static void dm_disable_vblank(struct drm_crtc *crtc);
2399
2400 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2401                                  struct dc_state *state, bool enable)
2402 {
2403         enum dc_irq_source irq_source;
2404         struct amdgpu_crtc *acrtc;
2405         int rc = -EBUSY;
2406         int i = 0;
2407
2408         for (i = 0; i < state->stream_count; i++) {
2409                 acrtc = get_crtc_by_otg_inst(
2410                                 adev, state->stream_status[i].primary_otg_inst);
2411
2412                 if (acrtc && state->stream_status[i].plane_count != 0) {
2413                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2414                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2415                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2416                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2417                         if (rc)
2418                                 DRM_WARN("Failed to %s pflip interrupts\n",
2419                                          enable ? "enable" : "disable");
2420
2421                         if (enable) {
2422                                 rc = dm_enable_vblank(&acrtc->base);
2423                                 if (rc)
2424                                         DRM_WARN("Failed to enable vblank interrupts\n");
2425                         } else {
2426                                 dm_disable_vblank(&acrtc->base);
2427                         }
2428
2429                 }
2430         }
2431
2432 }
2433
2434 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2435 {
2436         struct dc_state *context = NULL;
2437         enum dc_status res = DC_ERROR_UNEXPECTED;
2438         int i;
2439         struct dc_stream_state *del_streams[MAX_PIPES];
2440         int del_streams_count = 0;
2441
2442         memset(del_streams, 0, sizeof(del_streams));
2443
2444         context = dc_create_state(dc);
2445         if (context == NULL)
2446                 goto context_alloc_fail;
2447
2448         dc_resource_state_copy_construct_current(dc, context);
2449
2450         /* First remove from context all streams */
2451         for (i = 0; i < context->stream_count; i++) {
2452                 struct dc_stream_state *stream = context->streams[i];
2453
2454                 del_streams[del_streams_count++] = stream;
2455         }
2456
2457         /* Remove all planes for removed streams and then remove the streams */
2458         for (i = 0; i < del_streams_count; i++) {
2459                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2460                         res = DC_FAIL_DETACH_SURFACES;
2461                         goto fail;
2462                 }
2463
2464                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2465                 if (res != DC_OK)
2466                         goto fail;
2467         }
2468
2469         res = dc_commit_state(dc, context);
2470
2471 fail:
2472         dc_release_state(context);
2473
2474 context_alloc_fail:
2475         return res;
2476 }
2477
2478 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2479 {
2480         int i;
2481
2482         if (dm->hpd_rx_offload_wq) {
2483                 for (i = 0; i < dm->dc->caps.max_links; i++)
2484                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2485         }
2486 }
2487
2488 static int dm_suspend(void *handle)
2489 {
2490         struct amdgpu_device *adev = handle;
2491         struct amdgpu_display_manager *dm = &adev->dm;
2492         int ret = 0;
2493
2494         if (amdgpu_in_reset(adev)) {
2495                 mutex_lock(&dm->dc_lock);
2496
2497                 dc_allow_idle_optimizations(adev->dm.dc, false);
2498
2499                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2500
2501                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2502
2503                 amdgpu_dm_commit_zero_streams(dm->dc);
2504
2505                 amdgpu_dm_irq_suspend(adev);
2506
2507                 hpd_rx_irq_work_suspend(dm);
2508
2509                 return ret;
2510         }
2511
2512         WARN_ON(adev->dm.cached_state);
2513         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2514
2515         s3_handle_mst(adev_to_drm(adev), true);
2516
2517         amdgpu_dm_irq_suspend(adev);
2518
2519         hpd_rx_irq_work_suspend(dm);
2520
2521         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2522
2523         return 0;
2524 }
2525
2526 struct amdgpu_dm_connector *
2527 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2528                                              struct drm_crtc *crtc)
2529 {
2530         uint32_t i;
2531         struct drm_connector_state *new_con_state;
2532         struct drm_connector *connector;
2533         struct drm_crtc *crtc_from_state;
2534
2535         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2536                 crtc_from_state = new_con_state->crtc;
2537
2538                 if (crtc_from_state == crtc)
2539                         return to_amdgpu_dm_connector(connector);
2540         }
2541
2542         return NULL;
2543 }
2544
2545 static void emulated_link_detect(struct dc_link *link)
2546 {
2547         struct dc_sink_init_data sink_init_data = { 0 };
2548         struct display_sink_capability sink_caps = { 0 };
2549         enum dc_edid_status edid_status;
2550         struct dc_context *dc_ctx = link->ctx;
2551         struct dc_sink *sink = NULL;
2552         struct dc_sink *prev_sink = NULL;
2553
2554         link->type = dc_connection_none;
2555         prev_sink = link->local_sink;
2556
2557         if (prev_sink)
2558                 dc_sink_release(prev_sink);
2559
2560         switch (link->connector_signal) {
2561         case SIGNAL_TYPE_HDMI_TYPE_A: {
2562                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2563                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2564                 break;
2565         }
2566
2567         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2568                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2569                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2570                 break;
2571         }
2572
2573         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2574                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2575                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2576                 break;
2577         }
2578
2579         case SIGNAL_TYPE_LVDS: {
2580                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2581                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2582                 break;
2583         }
2584
2585         case SIGNAL_TYPE_EDP: {
2586                 sink_caps.transaction_type =
2587                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2588                 sink_caps.signal = SIGNAL_TYPE_EDP;
2589                 break;
2590         }
2591
2592         case SIGNAL_TYPE_DISPLAY_PORT: {
2593                 sink_caps.transaction_type =
2594                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2595                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2596                 break;
2597         }
2598
2599         default:
2600                 DC_ERROR("Invalid connector type! signal:%d\n",
2601                         link->connector_signal);
2602                 return;
2603         }
2604
2605         sink_init_data.link = link;
2606         sink_init_data.sink_signal = sink_caps.signal;
2607
2608         sink = dc_sink_create(&sink_init_data);
2609         if (!sink) {
2610                 DC_ERROR("Failed to create sink!\n");
2611                 return;
2612         }
2613
2614         /* dc_sink_create returns a new reference */
2615         link->local_sink = sink;
2616
2617         edid_status = dm_helpers_read_local_edid(
2618                         link->ctx,
2619                         link,
2620                         sink);
2621
2622         if (edid_status != EDID_OK)
2623                 DC_ERROR("Failed to read EDID");
2624
2625 }
2626
2627 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2628                                      struct amdgpu_display_manager *dm)
2629 {
2630         struct {
2631                 struct dc_surface_update surface_updates[MAX_SURFACES];
2632                 struct dc_plane_info plane_infos[MAX_SURFACES];
2633                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2634                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2635                 struct dc_stream_update stream_update;
2636         } * bundle;
2637         int k, m;
2638
2639         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2640
2641         if (!bundle) {
2642                 dm_error("Failed to allocate update bundle\n");
2643                 goto cleanup;
2644         }
2645
2646         for (k = 0; k < dc_state->stream_count; k++) {
2647                 bundle->stream_update.stream = dc_state->streams[k];
2648
2649                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2650                         bundle->surface_updates[m].surface =
2651                                 dc_state->stream_status->plane_states[m];
2652                         bundle->surface_updates[m].surface->force_full_update =
2653                                 true;
2654                 }
2655                 dc_commit_updates_for_stream(
2656                         dm->dc, bundle->surface_updates,
2657                         dc_state->stream_status->plane_count,
2658                         dc_state->streams[k], &bundle->stream_update, dc_state);
2659         }
2660
2661 cleanup:
2662         kfree(bundle);
2663
2664         return;
2665 }
2666
2667 static int dm_resume(void *handle)
2668 {
2669         struct amdgpu_device *adev = handle;
2670         struct drm_device *ddev = adev_to_drm(adev);
2671         struct amdgpu_display_manager *dm = &adev->dm;
2672         struct amdgpu_dm_connector *aconnector;
2673         struct drm_connector *connector;
2674         struct drm_connector_list_iter iter;
2675         struct drm_crtc *crtc;
2676         struct drm_crtc_state *new_crtc_state;
2677         struct dm_crtc_state *dm_new_crtc_state;
2678         struct drm_plane *plane;
2679         struct drm_plane_state *new_plane_state;
2680         struct dm_plane_state *dm_new_plane_state;
2681         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2682         enum dc_connection_type new_connection_type = dc_connection_none;
2683         struct dc_state *dc_state;
2684         int i, r, j;
2685
2686         if (amdgpu_in_reset(adev)) {
2687                 dc_state = dm->cached_dc_state;
2688
2689                 /*
2690                  * The dc->current_state is backed up into dm->cached_dc_state
2691                  * before we commit 0 streams.
2692                  *
2693                  * DC will clear link encoder assignments on the real state
2694                  * but the changes won't propagate over to the copy we made
2695                  * before the 0 streams commit.
2696                  *
2697                  * DC expects that link encoder assignments are *not* valid
2698                  * when committing a state, so as a workaround we can copy
2699                  * off of the current state.
2700                  *
2701                  * We lose the previous assignments, but we had already
2702                  * commit 0 streams anyway.
2703                  */
2704                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2705
2706                 r = dm_dmub_hw_init(adev);
2707                 if (r)
2708                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2709
2710                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2711                 dc_resume(dm->dc);
2712
2713                 amdgpu_dm_irq_resume_early(adev);
2714
2715                 for (i = 0; i < dc_state->stream_count; i++) {
2716                         dc_state->streams[i]->mode_changed = true;
2717                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2718                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2719                                         = 0xffffffff;
2720                         }
2721                 }
2722
2723                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2724                         amdgpu_dm_outbox_init(adev);
2725                         dc_enable_dmub_outbox(adev->dm.dc);
2726                 }
2727
2728                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2729
2730                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2731
2732                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2733
2734                 dc_release_state(dm->cached_dc_state);
2735                 dm->cached_dc_state = NULL;
2736
2737                 amdgpu_dm_irq_resume_late(adev);
2738
2739                 mutex_unlock(&dm->dc_lock);
2740
2741                 return 0;
2742         }
2743         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2744         dc_release_state(dm_state->context);
2745         dm_state->context = dc_create_state(dm->dc);
2746         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2747         dc_resource_state_construct(dm->dc, dm_state->context);
2748
2749         /* Before powering on DC we need to re-initialize DMUB. */
2750         dm_dmub_hw_resume(adev);
2751
2752         /* Re-enable outbox interrupts for DPIA. */
2753         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2754                 amdgpu_dm_outbox_init(adev);
2755                 dc_enable_dmub_outbox(adev->dm.dc);
2756         }
2757
2758         /* power on hardware */
2759         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2760
2761         /* program HPD filter */
2762         dc_resume(dm->dc);
2763
2764         /*
2765          * early enable HPD Rx IRQ, should be done before set mode as short
2766          * pulse interrupts are used for MST
2767          */
2768         amdgpu_dm_irq_resume_early(adev);
2769
2770         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2771         s3_handle_mst(ddev, false);
2772
2773         /* Do detection*/
2774         drm_connector_list_iter_begin(ddev, &iter);
2775         drm_for_each_connector_iter(connector, &iter) {
2776                 aconnector = to_amdgpu_dm_connector(connector);
2777
2778                 /*
2779                  * this is the case when traversing through already created
2780                  * MST connectors, should be skipped
2781                  */
2782                 if (aconnector->dc_link &&
2783                     aconnector->dc_link->type == dc_connection_mst_branch)
2784                         continue;
2785
2786                 mutex_lock(&aconnector->hpd_lock);
2787                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2788                         DRM_ERROR("KMS: Failed to detect connector\n");
2789
2790                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2791                         emulated_link_detect(aconnector->dc_link);
2792                 } else {
2793                         mutex_lock(&dm->dc_lock);
2794                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2795                         mutex_unlock(&dm->dc_lock);
2796                 }
2797
2798                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2799                         aconnector->fake_enable = false;
2800
2801                 if (aconnector->dc_sink)
2802                         dc_sink_release(aconnector->dc_sink);
2803                 aconnector->dc_sink = NULL;
2804                 amdgpu_dm_update_connector_after_detect(aconnector);
2805                 mutex_unlock(&aconnector->hpd_lock);
2806         }
2807         drm_connector_list_iter_end(&iter);
2808
2809         /* Force mode set in atomic commit */
2810         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2811                 new_crtc_state->active_changed = true;
2812
2813         /*
2814          * atomic_check is expected to create the dc states. We need to release
2815          * them here, since they were duplicated as part of the suspend
2816          * procedure.
2817          */
2818         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2819                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2820                 if (dm_new_crtc_state->stream) {
2821                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2822                         dc_stream_release(dm_new_crtc_state->stream);
2823                         dm_new_crtc_state->stream = NULL;
2824                 }
2825         }
2826
2827         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2828                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2829                 if (dm_new_plane_state->dc_state) {
2830                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2831                         dc_plane_state_release(dm_new_plane_state->dc_state);
2832                         dm_new_plane_state->dc_state = NULL;
2833                 }
2834         }
2835
2836         drm_atomic_helper_resume(ddev, dm->cached_state);
2837
2838         dm->cached_state = NULL;
2839
2840         amdgpu_dm_irq_resume_late(adev);
2841
2842         amdgpu_dm_smu_write_watermarks_table(adev);
2843
2844         return 0;
2845 }
2846
2847 /**
2848  * DOC: DM Lifecycle
2849  *
2850  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2851  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2852  * the base driver's device list to be initialized and torn down accordingly.
2853  *
2854  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2855  */
2856
2857 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2858         .name = "dm",
2859         .early_init = dm_early_init,
2860         .late_init = dm_late_init,
2861         .sw_init = dm_sw_init,
2862         .sw_fini = dm_sw_fini,
2863         .early_fini = amdgpu_dm_early_fini,
2864         .hw_init = dm_hw_init,
2865         .hw_fini = dm_hw_fini,
2866         .suspend = dm_suspend,
2867         .resume = dm_resume,
2868         .is_idle = dm_is_idle,
2869         .wait_for_idle = dm_wait_for_idle,
2870         .check_soft_reset = dm_check_soft_reset,
2871         .soft_reset = dm_soft_reset,
2872         .set_clockgating_state = dm_set_clockgating_state,
2873         .set_powergating_state = dm_set_powergating_state,
2874 };
2875
2876 const struct amdgpu_ip_block_version dm_ip_block =
2877 {
2878         .type = AMD_IP_BLOCK_TYPE_DCE,
2879         .major = 1,
2880         .minor = 0,
2881         .rev = 0,
2882         .funcs = &amdgpu_dm_funcs,
2883 };
2884
2885
2886 /**
2887  * DOC: atomic
2888  *
2889  * *WIP*
2890  */
2891
2892 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2893         .fb_create = amdgpu_display_user_framebuffer_create,
2894         .get_format_info = amd_get_format_info,
2895         .output_poll_changed = drm_fb_helper_output_poll_changed,
2896         .atomic_check = amdgpu_dm_atomic_check,
2897         .atomic_commit = drm_atomic_helper_commit,
2898 };
2899
2900 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2901         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2902 };
2903
2904 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2905 {
2906         u32 max_avg, min_cll, max, min, q, r;
2907         struct amdgpu_dm_backlight_caps *caps;
2908         struct amdgpu_display_manager *dm;
2909         struct drm_connector *conn_base;
2910         struct amdgpu_device *adev;
2911         struct dc_link *link = NULL;
2912         static const u8 pre_computed_values[] = {
2913                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2914                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2915         int i;
2916
2917         if (!aconnector || !aconnector->dc_link)
2918                 return;
2919
2920         link = aconnector->dc_link;
2921         if (link->connector_signal != SIGNAL_TYPE_EDP)
2922                 return;
2923
2924         conn_base = &aconnector->base;
2925         adev = drm_to_adev(conn_base->dev);
2926         dm = &adev->dm;
2927         for (i = 0; i < dm->num_of_edps; i++) {
2928                 if (link == dm->backlight_link[i])
2929                         break;
2930         }
2931         if (i >= dm->num_of_edps)
2932                 return;
2933         caps = &dm->backlight_caps[i];
2934         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2935         caps->aux_support = false;
2936         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2937         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2938
2939         if (caps->ext_caps->bits.oled == 1 /*||
2940             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2941             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2942                 caps->aux_support = true;
2943
2944         if (amdgpu_backlight == 0)
2945                 caps->aux_support = false;
2946         else if (amdgpu_backlight == 1)
2947                 caps->aux_support = true;
2948
2949         /* From the specification (CTA-861-G), for calculating the maximum
2950          * luminance we need to use:
2951          *      Luminance = 50*2**(CV/32)
2952          * Where CV is a one-byte value.
2953          * For calculating this expression we may need float point precision;
2954          * to avoid this complexity level, we take advantage that CV is divided
2955          * by a constant. From the Euclids division algorithm, we know that CV
2956          * can be written as: CV = 32*q + r. Next, we replace CV in the
2957          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2958          * need to pre-compute the value of r/32. For pre-computing the values
2959          * We just used the following Ruby line:
2960          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2961          * The results of the above expressions can be verified at
2962          * pre_computed_values.
2963          */
2964         q = max_avg >> 5;
2965         r = max_avg % 32;
2966         max = (1 << q) * pre_computed_values[r];
2967
2968         // min luminance: maxLum * (CV/255)^2 / 100
2969         q = DIV_ROUND_CLOSEST(min_cll, 255);
2970         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2971
2972         caps->aux_max_input_signal = max;
2973         caps->aux_min_input_signal = min;
2974 }
2975
2976 void amdgpu_dm_update_connector_after_detect(
2977                 struct amdgpu_dm_connector *aconnector)
2978 {
2979         struct drm_connector *connector = &aconnector->base;
2980         struct drm_device *dev = connector->dev;
2981         struct dc_sink *sink;
2982
2983         /* MST handled by drm_mst framework */
2984         if (aconnector->mst_mgr.mst_state == true)
2985                 return;
2986
2987         sink = aconnector->dc_link->local_sink;
2988         if (sink)
2989                 dc_sink_retain(sink);
2990
2991         /*
2992          * Edid mgmt connector gets first update only in mode_valid hook and then
2993          * the connector sink is set to either fake or physical sink depends on link status.
2994          * Skip if already done during boot.
2995          */
2996         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2997                         && aconnector->dc_em_sink) {
2998
2999                 /*
3000                  * For S3 resume with headless use eml_sink to fake stream
3001                  * because on resume connector->sink is set to NULL
3002                  */
3003                 mutex_lock(&dev->mode_config.mutex);
3004
3005                 if (sink) {
3006                         if (aconnector->dc_sink) {
3007                                 amdgpu_dm_update_freesync_caps(connector, NULL);
3008                                 /*
3009                                  * retain and release below are used to
3010                                  * bump up refcount for sink because the link doesn't point
3011                                  * to it anymore after disconnect, so on next crtc to connector
3012                                  * reshuffle by UMD we will get into unwanted dc_sink release
3013                                  */
3014                                 dc_sink_release(aconnector->dc_sink);
3015                         }
3016                         aconnector->dc_sink = sink;
3017                         dc_sink_retain(aconnector->dc_sink);
3018                         amdgpu_dm_update_freesync_caps(connector,
3019                                         aconnector->edid);
3020                 } else {
3021                         amdgpu_dm_update_freesync_caps(connector, NULL);
3022                         if (!aconnector->dc_sink) {
3023                                 aconnector->dc_sink = aconnector->dc_em_sink;
3024                                 dc_sink_retain(aconnector->dc_sink);
3025                         }
3026                 }
3027
3028                 mutex_unlock(&dev->mode_config.mutex);
3029
3030                 if (sink)
3031                         dc_sink_release(sink);
3032                 return;
3033         }
3034
3035         /*
3036          * TODO: temporary guard to look for proper fix
3037          * if this sink is MST sink, we should not do anything
3038          */
3039         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3040                 dc_sink_release(sink);
3041                 return;
3042         }
3043
3044         if (aconnector->dc_sink == sink) {
3045                 /*
3046                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3047                  * Do nothing!!
3048                  */
3049                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3050                                 aconnector->connector_id);
3051                 if (sink)
3052                         dc_sink_release(sink);
3053                 return;
3054         }
3055
3056         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3057                 aconnector->connector_id, aconnector->dc_sink, sink);
3058
3059         mutex_lock(&dev->mode_config.mutex);
3060
3061         /*
3062          * 1. Update status of the drm connector
3063          * 2. Send an event and let userspace tell us what to do
3064          */
3065         if (sink) {
3066                 /*
3067                  * TODO: check if we still need the S3 mode update workaround.
3068                  * If yes, put it here.
3069                  */
3070                 if (aconnector->dc_sink) {
3071                         amdgpu_dm_update_freesync_caps(connector, NULL);
3072                         dc_sink_release(aconnector->dc_sink);
3073                 }
3074
3075                 aconnector->dc_sink = sink;
3076                 dc_sink_retain(aconnector->dc_sink);
3077                 if (sink->dc_edid.length == 0) {
3078                         aconnector->edid = NULL;
3079                         if (aconnector->dc_link->aux_mode) {
3080                                 drm_dp_cec_unset_edid(
3081                                         &aconnector->dm_dp_aux.aux);
3082                         }
3083                 } else {
3084                         aconnector->edid =
3085                                 (struct edid *)sink->dc_edid.raw_edid;
3086
3087                         if (aconnector->dc_link->aux_mode)
3088                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3089                                                     aconnector->edid);
3090                 }
3091
3092                 drm_connector_update_edid_property(connector, aconnector->edid);
3093                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3094                 update_connector_ext_caps(aconnector);
3095         } else {
3096                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3097                 amdgpu_dm_update_freesync_caps(connector, NULL);
3098                 drm_connector_update_edid_property(connector, NULL);
3099                 aconnector->num_modes = 0;
3100                 dc_sink_release(aconnector->dc_sink);
3101                 aconnector->dc_sink = NULL;
3102                 aconnector->edid = NULL;
3103 #ifdef CONFIG_DRM_AMD_DC_HDCP
3104                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3105                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3106                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3107 #endif
3108         }
3109
3110         mutex_unlock(&dev->mode_config.mutex);
3111
3112         update_subconnector_property(aconnector);
3113
3114         if (sink)
3115                 dc_sink_release(sink);
3116 }
3117
3118 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3119 {
3120         struct drm_connector *connector = &aconnector->base;
3121         struct drm_device *dev = connector->dev;
3122         enum dc_connection_type new_connection_type = dc_connection_none;
3123         struct amdgpu_device *adev = drm_to_adev(dev);
3124 #ifdef CONFIG_DRM_AMD_DC_HDCP
3125         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3126 #endif
3127         bool ret = false;
3128
3129         if (adev->dm.disable_hpd_irq)
3130                 return;
3131
3132         /*
3133          * In case of failure or MST no need to update connector status or notify the OS
3134          * since (for MST case) MST does this in its own context.
3135          */
3136         mutex_lock(&aconnector->hpd_lock);
3137
3138 #ifdef CONFIG_DRM_AMD_DC_HDCP
3139         if (adev->dm.hdcp_workqueue) {
3140                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3141                 dm_con_state->update_hdcp = true;
3142         }
3143 #endif
3144         if (aconnector->fake_enable)
3145                 aconnector->fake_enable = false;
3146
3147         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3148                 DRM_ERROR("KMS: Failed to detect connector\n");
3149
3150         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3151                 emulated_link_detect(aconnector->dc_link);
3152
3153                 drm_modeset_lock_all(dev);
3154                 dm_restore_drm_connector_state(dev, connector);
3155                 drm_modeset_unlock_all(dev);
3156
3157                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3158                         drm_kms_helper_connector_hotplug_event(connector);
3159         } else {
3160                 mutex_lock(&adev->dm.dc_lock);
3161                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3162                 mutex_unlock(&adev->dm.dc_lock);
3163                 if (ret) {
3164                         amdgpu_dm_update_connector_after_detect(aconnector);
3165
3166                         drm_modeset_lock_all(dev);
3167                         dm_restore_drm_connector_state(dev, connector);
3168                         drm_modeset_unlock_all(dev);
3169
3170                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3171                                 drm_kms_helper_connector_hotplug_event(connector);
3172                 }
3173         }
3174         mutex_unlock(&aconnector->hpd_lock);
3175
3176 }
3177
3178 static void handle_hpd_irq(void *param)
3179 {
3180         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3181
3182         handle_hpd_irq_helper(aconnector);
3183
3184 }
3185
3186 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3187 {
3188         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3189         uint8_t dret;
3190         bool new_irq_handled = false;
3191         int dpcd_addr;
3192         int dpcd_bytes_to_read;
3193
3194         const int max_process_count = 30;
3195         int process_count = 0;
3196
3197         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3198
3199         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3200                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3201                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3202                 dpcd_addr = DP_SINK_COUNT;
3203         } else {
3204                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3205                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3206                 dpcd_addr = DP_SINK_COUNT_ESI;
3207         }
3208
3209         dret = drm_dp_dpcd_read(
3210                 &aconnector->dm_dp_aux.aux,
3211                 dpcd_addr,
3212                 esi,
3213                 dpcd_bytes_to_read);
3214
3215         while (dret == dpcd_bytes_to_read &&
3216                 process_count < max_process_count) {
3217                 uint8_t retry;
3218                 dret = 0;
3219
3220                 process_count++;
3221
3222                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3223                 /* handle HPD short pulse irq */
3224                 if (aconnector->mst_mgr.mst_state)
3225                         drm_dp_mst_hpd_irq(
3226                                 &aconnector->mst_mgr,
3227                                 esi,
3228                                 &new_irq_handled);
3229
3230                 if (new_irq_handled) {
3231                         /* ACK at DPCD to notify down stream */
3232                         const int ack_dpcd_bytes_to_write =
3233                                 dpcd_bytes_to_read - 1;
3234
3235                         for (retry = 0; retry < 3; retry++) {
3236                                 uint8_t wret;
3237
3238                                 wret = drm_dp_dpcd_write(
3239                                         &aconnector->dm_dp_aux.aux,
3240                                         dpcd_addr + 1,
3241                                         &esi[1],
3242                                         ack_dpcd_bytes_to_write);
3243                                 if (wret == ack_dpcd_bytes_to_write)
3244                                         break;
3245                         }
3246
3247                         /* check if there is new irq to be handled */
3248                         dret = drm_dp_dpcd_read(
3249                                 &aconnector->dm_dp_aux.aux,
3250                                 dpcd_addr,
3251                                 esi,
3252                                 dpcd_bytes_to_read);
3253
3254                         new_irq_handled = false;
3255                 } else {
3256                         break;
3257                 }
3258         }
3259
3260         if (process_count == max_process_count)
3261                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3262 }
3263
3264 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3265                                                         union hpd_irq_data hpd_irq_data)
3266 {
3267         struct hpd_rx_irq_offload_work *offload_work =
3268                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3269
3270         if (!offload_work) {
3271                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3272                 return;
3273         }
3274
3275         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3276         offload_work->data = hpd_irq_data;
3277         offload_work->offload_wq = offload_wq;
3278
3279         queue_work(offload_wq->wq, &offload_work->work);
3280         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3281 }
3282
3283 static void handle_hpd_rx_irq(void *param)
3284 {
3285         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3286         struct drm_connector *connector = &aconnector->base;
3287         struct drm_device *dev = connector->dev;
3288         struct dc_link *dc_link = aconnector->dc_link;
3289         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3290         bool result = false;
3291         enum dc_connection_type new_connection_type = dc_connection_none;
3292         struct amdgpu_device *adev = drm_to_adev(dev);
3293         union hpd_irq_data hpd_irq_data;
3294         bool link_loss = false;
3295         bool has_left_work = false;
3296         int idx = aconnector->base.index;
3297         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3298
3299         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3300
3301         if (adev->dm.disable_hpd_irq)
3302                 return;
3303
3304         /*
3305          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3306          * conflict, after implement i2c helper, this mutex should be
3307          * retired.
3308          */
3309         mutex_lock(&aconnector->hpd_lock);
3310
3311         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3312                                                 &link_loss, true, &has_left_work);
3313
3314         if (!has_left_work)
3315                 goto out;
3316
3317         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3318                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3319                 goto out;
3320         }
3321
3322         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3323                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3324                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3325                         dm_handle_mst_sideband_msg(aconnector);
3326                         goto out;
3327                 }
3328
3329                 if (link_loss) {
3330                         bool skip = false;
3331
3332                         spin_lock(&offload_wq->offload_lock);
3333                         skip = offload_wq->is_handling_link_loss;
3334
3335                         if (!skip)
3336                                 offload_wq->is_handling_link_loss = true;
3337
3338                         spin_unlock(&offload_wq->offload_lock);
3339
3340                         if (!skip)
3341                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3342
3343                         goto out;
3344                 }
3345         }
3346
3347 out:
3348         if (result && !is_mst_root_connector) {
3349                 /* Downstream Port status changed. */
3350                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3351                         DRM_ERROR("KMS: Failed to detect connector\n");
3352
3353                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3354                         emulated_link_detect(dc_link);
3355
3356                         if (aconnector->fake_enable)
3357                                 aconnector->fake_enable = false;
3358
3359                         amdgpu_dm_update_connector_after_detect(aconnector);
3360
3361
3362                         drm_modeset_lock_all(dev);
3363                         dm_restore_drm_connector_state(dev, connector);
3364                         drm_modeset_unlock_all(dev);
3365
3366                         drm_kms_helper_connector_hotplug_event(connector);
3367                 } else {
3368                         bool ret = false;
3369
3370                         mutex_lock(&adev->dm.dc_lock);
3371                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3372                         mutex_unlock(&adev->dm.dc_lock);
3373
3374                         if (ret) {
3375                                 if (aconnector->fake_enable)
3376                                         aconnector->fake_enable = false;
3377
3378                                 amdgpu_dm_update_connector_after_detect(aconnector);
3379
3380                                 drm_modeset_lock_all(dev);
3381                                 dm_restore_drm_connector_state(dev, connector);
3382                                 drm_modeset_unlock_all(dev);
3383
3384                                 drm_kms_helper_connector_hotplug_event(connector);
3385                         }
3386                 }
3387         }
3388 #ifdef CONFIG_DRM_AMD_DC_HDCP
3389         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3390                 if (adev->dm.hdcp_workqueue)
3391                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3392         }
3393 #endif
3394
3395         if (dc_link->type != dc_connection_mst_branch)
3396                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3397
3398         mutex_unlock(&aconnector->hpd_lock);
3399 }
3400
3401 static void register_hpd_handlers(struct amdgpu_device *adev)
3402 {
3403         struct drm_device *dev = adev_to_drm(adev);
3404         struct drm_connector *connector;
3405         struct amdgpu_dm_connector *aconnector;
3406         const struct dc_link *dc_link;
3407         struct dc_interrupt_params int_params = {0};
3408
3409         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3410         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3411
3412         list_for_each_entry(connector,
3413                         &dev->mode_config.connector_list, head) {
3414
3415                 aconnector = to_amdgpu_dm_connector(connector);
3416                 dc_link = aconnector->dc_link;
3417
3418                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3419                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3420                         int_params.irq_source = dc_link->irq_source_hpd;
3421
3422                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3423                                         handle_hpd_irq,
3424                                         (void *) aconnector);
3425                 }
3426
3427                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3428
3429                         /* Also register for DP short pulse (hpd_rx). */
3430                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3431                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3432
3433                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3434                                         handle_hpd_rx_irq,
3435                                         (void *) aconnector);
3436
3437                         if (adev->dm.hpd_rx_offload_wq)
3438                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3439                                         aconnector;
3440                 }
3441         }
3442 }
3443
3444 #if defined(CONFIG_DRM_AMD_DC_SI)
3445 /* Register IRQ sources and initialize IRQ callbacks */
3446 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3447 {
3448         struct dc *dc = adev->dm.dc;
3449         struct common_irq_params *c_irq_params;
3450         struct dc_interrupt_params int_params = {0};
3451         int r;
3452         int i;
3453         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3454
3455         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3456         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3457
3458         /*
3459          * Actions of amdgpu_irq_add_id():
3460          * 1. Register a set() function with base driver.
3461          *    Base driver will call set() function to enable/disable an
3462          *    interrupt in DC hardware.
3463          * 2. Register amdgpu_dm_irq_handler().
3464          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3465          *    coming from DC hardware.
3466          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3467          *    for acknowledging and handling. */
3468
3469         /* Use VBLANK interrupt */
3470         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3471                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3472                 if (r) {
3473                         DRM_ERROR("Failed to add crtc irq id!\n");
3474                         return r;
3475                 }
3476
3477                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3478                 int_params.irq_source =
3479                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3480
3481                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3482
3483                 c_irq_params->adev = adev;
3484                 c_irq_params->irq_src = int_params.irq_source;
3485
3486                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3487                                 dm_crtc_high_irq, c_irq_params);
3488         }
3489
3490         /* Use GRPH_PFLIP interrupt */
3491         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3492                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3493                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3494                 if (r) {
3495                         DRM_ERROR("Failed to add page flip irq id!\n");
3496                         return r;
3497                 }
3498
3499                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3500                 int_params.irq_source =
3501                         dc_interrupt_to_irq_source(dc, i, 0);
3502
3503                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3504
3505                 c_irq_params->adev = adev;
3506                 c_irq_params->irq_src = int_params.irq_source;
3507
3508                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3509                                 dm_pflip_high_irq, c_irq_params);
3510
3511         }
3512
3513         /* HPD */
3514         r = amdgpu_irq_add_id(adev, client_id,
3515                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3516         if (r) {
3517                 DRM_ERROR("Failed to add hpd irq id!\n");
3518                 return r;
3519         }
3520
3521         register_hpd_handlers(adev);
3522
3523         return 0;
3524 }
3525 #endif
3526
3527 /* Register IRQ sources and initialize IRQ callbacks */
3528 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3529 {
3530         struct dc *dc = adev->dm.dc;
3531         struct common_irq_params *c_irq_params;
3532         struct dc_interrupt_params int_params = {0};
3533         int r;
3534         int i;
3535         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3536
3537         if (adev->family >= AMDGPU_FAMILY_AI)
3538                 client_id = SOC15_IH_CLIENTID_DCE;
3539
3540         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3541         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3542
3543         /*
3544          * Actions of amdgpu_irq_add_id():
3545          * 1. Register a set() function with base driver.
3546          *    Base driver will call set() function to enable/disable an
3547          *    interrupt in DC hardware.
3548          * 2. Register amdgpu_dm_irq_handler().
3549          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3550          *    coming from DC hardware.
3551          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3552          *    for acknowledging and handling. */
3553
3554         /* Use VBLANK interrupt */
3555         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3556                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3557                 if (r) {
3558                         DRM_ERROR("Failed to add crtc irq id!\n");
3559                         return r;
3560                 }
3561
3562                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3563                 int_params.irq_source =
3564                         dc_interrupt_to_irq_source(dc, i, 0);
3565
3566                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3567
3568                 c_irq_params->adev = adev;
3569                 c_irq_params->irq_src = int_params.irq_source;
3570
3571                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3572                                 dm_crtc_high_irq, c_irq_params);
3573         }
3574
3575         /* Use VUPDATE interrupt */
3576         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3577                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3578                 if (r) {
3579                         DRM_ERROR("Failed to add vupdate irq id!\n");
3580                         return r;
3581                 }
3582
3583                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584                 int_params.irq_source =
3585                         dc_interrupt_to_irq_source(dc, i, 0);
3586
3587                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3588
3589                 c_irq_params->adev = adev;
3590                 c_irq_params->irq_src = int_params.irq_source;
3591
3592                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3593                                 dm_vupdate_high_irq, c_irq_params);
3594         }
3595
3596         /* Use GRPH_PFLIP interrupt */
3597         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3598                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3599                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3600                 if (r) {
3601                         DRM_ERROR("Failed to add page flip irq id!\n");
3602                         return r;
3603                 }
3604
3605                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3606                 int_params.irq_source =
3607                         dc_interrupt_to_irq_source(dc, i, 0);
3608
3609                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3610
3611                 c_irq_params->adev = adev;
3612                 c_irq_params->irq_src = int_params.irq_source;
3613
3614                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3615                                 dm_pflip_high_irq, c_irq_params);
3616
3617         }
3618
3619         /* HPD */
3620         r = amdgpu_irq_add_id(adev, client_id,
3621                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3622         if (r) {
3623                 DRM_ERROR("Failed to add hpd irq id!\n");
3624                 return r;
3625         }
3626
3627         register_hpd_handlers(adev);
3628
3629         return 0;
3630 }
3631
3632 /* Register IRQ sources and initialize IRQ callbacks */
3633 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3634 {
3635         struct dc *dc = adev->dm.dc;
3636         struct common_irq_params *c_irq_params;
3637         struct dc_interrupt_params int_params = {0};
3638         int r;
3639         int i;
3640 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3641         static const unsigned int vrtl_int_srcid[] = {
3642                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3643                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3644                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3645                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3646                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3647                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3648         };
3649 #endif
3650
3651         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3652         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3653
3654         /*
3655          * Actions of amdgpu_irq_add_id():
3656          * 1. Register a set() function with base driver.
3657          *    Base driver will call set() function to enable/disable an
3658          *    interrupt in DC hardware.
3659          * 2. Register amdgpu_dm_irq_handler().
3660          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3661          *    coming from DC hardware.
3662          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3663          *    for acknowledging and handling.
3664          */
3665
3666         /* Use VSTARTUP interrupt */
3667         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3668                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3669                         i++) {
3670                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3671
3672                 if (r) {
3673                         DRM_ERROR("Failed to add crtc irq id!\n");
3674                         return r;
3675                 }
3676
3677                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3678                 int_params.irq_source =
3679                         dc_interrupt_to_irq_source(dc, i, 0);
3680
3681                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3682
3683                 c_irq_params->adev = adev;
3684                 c_irq_params->irq_src = int_params.irq_source;
3685
3686                 amdgpu_dm_irq_register_interrupt(
3687                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3688         }
3689
3690         /* Use otg vertical line interrupt */
3691 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3692         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3693                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3694                                 vrtl_int_srcid[i], &adev->vline0_irq);
3695
3696                 if (r) {
3697                         DRM_ERROR("Failed to add vline0 irq id!\n");
3698                         return r;
3699                 }
3700
3701                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3702                 int_params.irq_source =
3703                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3704
3705                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3706                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3707                         break;
3708                 }
3709
3710                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3711                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3712
3713                 c_irq_params->adev = adev;
3714                 c_irq_params->irq_src = int_params.irq_source;
3715
3716                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3717                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3718         }
3719 #endif
3720
3721         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3722          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3723          * to trigger at end of each vblank, regardless of state of the lock,
3724          * matching DCE behaviour.
3725          */
3726         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3727              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3728              i++) {
3729                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3730
3731                 if (r) {
3732                         DRM_ERROR("Failed to add vupdate irq id!\n");
3733                         return r;
3734                 }
3735
3736                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3737                 int_params.irq_source =
3738                         dc_interrupt_to_irq_source(dc, i, 0);
3739
3740                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3741
3742                 c_irq_params->adev = adev;
3743                 c_irq_params->irq_src = int_params.irq_source;
3744
3745                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3746                                 dm_vupdate_high_irq, c_irq_params);
3747         }
3748
3749         /* Use GRPH_PFLIP interrupt */
3750         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3751                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3752                         i++) {
3753                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3754                 if (r) {
3755                         DRM_ERROR("Failed to add page flip irq id!\n");
3756                         return r;
3757                 }
3758
3759                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3760                 int_params.irq_source =
3761                         dc_interrupt_to_irq_source(dc, i, 0);
3762
3763                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3764
3765                 c_irq_params->adev = adev;
3766                 c_irq_params->irq_src = int_params.irq_source;
3767
3768                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3769                                 dm_pflip_high_irq, c_irq_params);
3770
3771         }
3772
3773         /* HPD */
3774         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3775                         &adev->hpd_irq);
3776         if (r) {
3777                 DRM_ERROR("Failed to add hpd irq id!\n");
3778                 return r;
3779         }
3780
3781         register_hpd_handlers(adev);
3782
3783         return 0;
3784 }
3785 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3786 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3787 {
3788         struct dc *dc = adev->dm.dc;
3789         struct common_irq_params *c_irq_params;
3790         struct dc_interrupt_params int_params = {0};
3791         int r, i;
3792
3793         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3794         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3795
3796         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3797                         &adev->dmub_outbox_irq);
3798         if (r) {
3799                 DRM_ERROR("Failed to add outbox irq id!\n");
3800                 return r;
3801         }
3802
3803         if (dc->ctx->dmub_srv) {
3804                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3805                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3806                 int_params.irq_source =
3807                 dc_interrupt_to_irq_source(dc, i, 0);
3808
3809                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3810
3811                 c_irq_params->adev = adev;
3812                 c_irq_params->irq_src = int_params.irq_source;
3813
3814                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3815                                 dm_dmub_outbox1_low_irq, c_irq_params);
3816         }
3817
3818         return 0;
3819 }
3820
3821 /*
3822  * Acquires the lock for the atomic state object and returns
3823  * the new atomic state.
3824  *
3825  * This should only be called during atomic check.
3826  */
3827 int dm_atomic_get_state(struct drm_atomic_state *state,
3828                         struct dm_atomic_state **dm_state)
3829 {
3830         struct drm_device *dev = state->dev;
3831         struct amdgpu_device *adev = drm_to_adev(dev);
3832         struct amdgpu_display_manager *dm = &adev->dm;
3833         struct drm_private_state *priv_state;
3834
3835         if (*dm_state)
3836                 return 0;
3837
3838         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3839         if (IS_ERR(priv_state))
3840                 return PTR_ERR(priv_state);
3841
3842         *dm_state = to_dm_atomic_state(priv_state);
3843
3844         return 0;
3845 }
3846
3847 static struct dm_atomic_state *
3848 dm_atomic_get_new_state(struct drm_atomic_state *state)
3849 {
3850         struct drm_device *dev = state->dev;
3851         struct amdgpu_device *adev = drm_to_adev(dev);
3852         struct amdgpu_display_manager *dm = &adev->dm;
3853         struct drm_private_obj *obj;
3854         struct drm_private_state *new_obj_state;
3855         int i;
3856
3857         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3858                 if (obj->funcs == dm->atomic_obj.funcs)
3859                         return to_dm_atomic_state(new_obj_state);
3860         }
3861
3862         return NULL;
3863 }
3864
3865 static struct drm_private_state *
3866 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3867 {
3868         struct dm_atomic_state *old_state, *new_state;
3869
3870         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3871         if (!new_state)
3872                 return NULL;
3873
3874         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3875
3876         old_state = to_dm_atomic_state(obj->state);
3877
3878         if (old_state && old_state->context)
3879                 new_state->context = dc_copy_state(old_state->context);
3880
3881         if (!new_state->context) {
3882                 kfree(new_state);
3883                 return NULL;
3884         }
3885
3886         return &new_state->base;
3887 }
3888
3889 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3890                                     struct drm_private_state *state)
3891 {
3892         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3893
3894         if (dm_state && dm_state->context)
3895                 dc_release_state(dm_state->context);
3896
3897         kfree(dm_state);
3898 }
3899
3900 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3901         .atomic_duplicate_state = dm_atomic_duplicate_state,
3902         .atomic_destroy_state = dm_atomic_destroy_state,
3903 };
3904
3905 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3906 {
3907         struct dm_atomic_state *state;
3908         int r;
3909
3910         adev->mode_info.mode_config_initialized = true;
3911
3912         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3913         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3914
3915         adev_to_drm(adev)->mode_config.max_width = 16384;
3916         adev_to_drm(adev)->mode_config.max_height = 16384;
3917
3918         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3919         /* disable prefer shadow for now due to hibernation issues */
3920         adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3921         /* indicates support for immediate flip */
3922         adev_to_drm(adev)->mode_config.async_page_flip = true;
3923
3924         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3925
3926         state = kzalloc(sizeof(*state), GFP_KERNEL);
3927         if (!state)
3928                 return -ENOMEM;
3929
3930         state->context = dc_create_state(adev->dm.dc);
3931         if (!state->context) {
3932                 kfree(state);
3933                 return -ENOMEM;
3934         }
3935
3936         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3937
3938         drm_atomic_private_obj_init(adev_to_drm(adev),
3939                                     &adev->dm.atomic_obj,
3940                                     &state->base,
3941                                     &dm_atomic_state_funcs);
3942
3943         r = amdgpu_display_modeset_create_props(adev);
3944         if (r) {
3945                 dc_release_state(state->context);
3946                 kfree(state);
3947                 return r;
3948         }
3949
3950         r = amdgpu_dm_audio_init(adev);
3951         if (r) {
3952                 dc_release_state(state->context);
3953                 kfree(state);
3954                 return r;
3955         }
3956
3957         return 0;
3958 }
3959
3960 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3961 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3962 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3963
3964 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3965                                             int bl_idx)
3966 {
3967 #if defined(CONFIG_ACPI)
3968         struct amdgpu_dm_backlight_caps caps;
3969
3970         memset(&caps, 0, sizeof(caps));
3971
3972         if (dm->backlight_caps[bl_idx].caps_valid)
3973                 return;
3974
3975         amdgpu_acpi_get_backlight_caps(&caps);
3976         if (caps.caps_valid) {
3977                 dm->backlight_caps[bl_idx].caps_valid = true;
3978                 if (caps.aux_support)
3979                         return;
3980                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3981                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3982         } else {
3983                 dm->backlight_caps[bl_idx].min_input_signal =
3984                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3985                 dm->backlight_caps[bl_idx].max_input_signal =
3986                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3987         }
3988 #else
3989         if (dm->backlight_caps[bl_idx].aux_support)
3990                 return;
3991
3992         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3993         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3994 #endif
3995 }
3996
3997 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3998                                 unsigned *min, unsigned *max)
3999 {
4000         if (!caps)
4001                 return 0;
4002
4003         if (caps->aux_support) {
4004                 // Firmware limits are in nits, DC API wants millinits.
4005                 *max = 1000 * caps->aux_max_input_signal;
4006                 *min = 1000 * caps->aux_min_input_signal;
4007         } else {
4008                 // Firmware limits are 8-bit, PWM control is 16-bit.
4009                 *max = 0x101 * caps->max_input_signal;
4010                 *min = 0x101 * caps->min_input_signal;
4011         }
4012         return 1;
4013 }
4014
4015 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4016                                         uint32_t brightness)
4017 {
4018         unsigned min, max;
4019
4020         if (!get_brightness_range(caps, &min, &max))
4021                 return brightness;
4022
4023         // Rescale 0..255 to min..max
4024         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4025                                        AMDGPU_MAX_BL_LEVEL);
4026 }
4027
4028 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4029                                       uint32_t brightness)
4030 {
4031         unsigned min, max;
4032
4033         if (!get_brightness_range(caps, &min, &max))
4034                 return brightness;
4035
4036         if (brightness < min)
4037                 return 0;
4038         // Rescale min..max to 0..255
4039         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4040                                  max - min);
4041 }
4042
4043 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4044                                          int bl_idx,
4045                                          u32 user_brightness)
4046 {
4047         struct amdgpu_dm_backlight_caps caps;
4048         struct dc_link *link;
4049         u32 brightness;
4050         bool rc;
4051
4052         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4053         caps = dm->backlight_caps[bl_idx];
4054
4055         dm->brightness[bl_idx] = user_brightness;
4056         /* update scratch register */
4057         if (bl_idx == 0)
4058                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4059         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4060         link = (struct dc_link *)dm->backlight_link[bl_idx];
4061
4062         /* Change brightness based on AUX property */
4063         if (caps.aux_support) {
4064                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4065                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4066                 if (!rc)
4067                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4068         } else {
4069                 rc = dc_link_set_backlight_level(link, brightness, 0);
4070                 if (!rc)
4071                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4072         }
4073
4074         if (rc)
4075                 dm->actual_brightness[bl_idx] = user_brightness;
4076 }
4077
4078 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4079 {
4080         struct amdgpu_display_manager *dm = bl_get_data(bd);
4081         int i;
4082
4083         for (i = 0; i < dm->num_of_edps; i++) {
4084                 if (bd == dm->backlight_dev[i])
4085                         break;
4086         }
4087         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4088                 i = 0;
4089         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4090
4091         return 0;
4092 }
4093
4094 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4095                                          int bl_idx)
4096 {
4097         struct amdgpu_dm_backlight_caps caps;
4098         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4099
4100         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4101         caps = dm->backlight_caps[bl_idx];
4102
4103         if (caps.aux_support) {
4104                 u32 avg, peak;
4105                 bool rc;
4106
4107                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4108                 if (!rc)
4109                         return dm->brightness[bl_idx];
4110                 return convert_brightness_to_user(&caps, avg);
4111         } else {
4112                 int ret = dc_link_get_backlight_level(link);
4113
4114                 if (ret == DC_ERROR_UNEXPECTED)
4115                         return dm->brightness[bl_idx];
4116                 return convert_brightness_to_user(&caps, ret);
4117         }
4118 }
4119
4120 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4121 {
4122         struct amdgpu_display_manager *dm = bl_get_data(bd);
4123         int i;
4124
4125         for (i = 0; i < dm->num_of_edps; i++) {
4126                 if (bd == dm->backlight_dev[i])
4127                         break;
4128         }
4129         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4130                 i = 0;
4131         return amdgpu_dm_backlight_get_level(dm, i);
4132 }
4133
4134 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4135         .options = BL_CORE_SUSPENDRESUME,
4136         .get_brightness = amdgpu_dm_backlight_get_brightness,
4137         .update_status  = amdgpu_dm_backlight_update_status,
4138 };
4139
4140 static void
4141 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4142 {
4143         char bl_name[16];
4144         struct backlight_properties props = { 0 };
4145
4146         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4147         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4148
4149         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4150         props.brightness = AMDGPU_MAX_BL_LEVEL;
4151         props.type = BACKLIGHT_RAW;
4152
4153         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4154                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4155
4156         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4157                                                                        adev_to_drm(dm->adev)->dev,
4158                                                                        dm,
4159                                                                        &amdgpu_dm_backlight_ops,
4160                                                                        &props);
4161
4162         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4163                 DRM_ERROR("DM: Backlight registration failed!\n");
4164         else
4165                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4166 }
4167
4168 static int initialize_plane(struct amdgpu_display_manager *dm,
4169                             struct amdgpu_mode_info *mode_info, int plane_id,
4170                             enum drm_plane_type plane_type,
4171                             const struct dc_plane_cap *plane_cap)
4172 {
4173         struct drm_plane *plane;
4174         unsigned long possible_crtcs;
4175         int ret = 0;
4176
4177         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4178         if (!plane) {
4179                 DRM_ERROR("KMS: Failed to allocate plane\n");
4180                 return -ENOMEM;
4181         }
4182         plane->type = plane_type;
4183
4184         /*
4185          * HACK: IGT tests expect that the primary plane for a CRTC
4186          * can only have one possible CRTC. Only expose support for
4187          * any CRTC if they're not going to be used as a primary plane
4188          * for a CRTC - like overlay or underlay planes.
4189          */
4190         possible_crtcs = 1 << plane_id;
4191         if (plane_id >= dm->dc->caps.max_streams)
4192                 possible_crtcs = 0xff;
4193
4194         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4195
4196         if (ret) {
4197                 DRM_ERROR("KMS: Failed to initialize plane\n");
4198                 kfree(plane);
4199                 return ret;
4200         }
4201
4202         if (mode_info)
4203                 mode_info->planes[plane_id] = plane;
4204
4205         return ret;
4206 }
4207
4208
4209 static void register_backlight_device(struct amdgpu_display_manager *dm,
4210                                       struct dc_link *link)
4211 {
4212         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4213             link->type != dc_connection_none) {
4214                 /*
4215                  * Event if registration failed, we should continue with
4216                  * DM initialization because not having a backlight control
4217                  * is better then a black screen.
4218                  */
4219                 if (!dm->backlight_dev[dm->num_of_edps])
4220                         amdgpu_dm_register_backlight_device(dm);
4221
4222                 if (dm->backlight_dev[dm->num_of_edps]) {
4223                         dm->backlight_link[dm->num_of_edps] = link;
4224                         dm->num_of_edps++;
4225                 }
4226         }
4227 }
4228
4229
4230 /*
4231  * In this architecture, the association
4232  * connector -> encoder -> crtc
4233  * id not really requried. The crtc and connector will hold the
4234  * display_index as an abstraction to use with DAL component
4235  *
4236  * Returns 0 on success
4237  */
4238 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4239 {
4240         struct amdgpu_display_manager *dm = &adev->dm;
4241         int32_t i;
4242         struct amdgpu_dm_connector *aconnector = NULL;
4243         struct amdgpu_encoder *aencoder = NULL;
4244         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4245         uint32_t link_cnt;
4246         int32_t primary_planes;
4247         enum dc_connection_type new_connection_type = dc_connection_none;
4248         const struct dc_plane_cap *plane;
4249         bool psr_feature_enabled = false;
4250
4251         dm->display_indexes_num = dm->dc->caps.max_streams;
4252         /* Update the actual used number of crtc */
4253         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4254
4255         link_cnt = dm->dc->caps.max_links;
4256         if (amdgpu_dm_mode_config_init(dm->adev)) {
4257                 DRM_ERROR("DM: Failed to initialize mode config\n");
4258                 return -EINVAL;
4259         }
4260
4261         /* There is one primary plane per CRTC */
4262         primary_planes = dm->dc->caps.max_streams;
4263         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4264
4265         /*
4266          * Initialize primary planes, implicit planes for legacy IOCTLS.
4267          * Order is reversed to match iteration order in atomic check.
4268          */
4269         for (i = (primary_planes - 1); i >= 0; i--) {
4270                 plane = &dm->dc->caps.planes[i];
4271
4272                 if (initialize_plane(dm, mode_info, i,
4273                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4274                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4275                         goto fail;
4276                 }
4277         }
4278
4279         /*
4280          * Initialize overlay planes, index starting after primary planes.
4281          * These planes have a higher DRM index than the primary planes since
4282          * they should be considered as having a higher z-order.
4283          * Order is reversed to match iteration order in atomic check.
4284          *
4285          * Only support DCN for now, and only expose one so we don't encourage
4286          * userspace to use up all the pipes.
4287          */
4288         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4289                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4290
4291                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4292                         continue;
4293
4294                 if (!plane->blends_with_above || !plane->blends_with_below)
4295                         continue;
4296
4297                 if (!plane->pixel_format_support.argb8888)
4298                         continue;
4299
4300                 if (initialize_plane(dm, NULL, primary_planes + i,
4301                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4302                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4303                         goto fail;
4304                 }
4305
4306                 /* Only create one overlay plane. */
4307                 break;
4308         }
4309
4310         for (i = 0; i < dm->dc->caps.max_streams; i++)
4311                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4312                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4313                         goto fail;
4314                 }
4315
4316         /* Use Outbox interrupt */
4317         switch (adev->ip_versions[DCE_HWIP][0]) {
4318         case IP_VERSION(3, 0, 0):
4319         case IP_VERSION(3, 1, 2):
4320         case IP_VERSION(3, 1, 3):
4321         case IP_VERSION(3, 1, 4):
4322         case IP_VERSION(3, 1, 5):
4323         case IP_VERSION(3, 1, 6):
4324         case IP_VERSION(3, 2, 0):
4325         case IP_VERSION(3, 2, 1):
4326         case IP_VERSION(2, 1, 0):
4327                 if (register_outbox_irq_handlers(dm->adev)) {
4328                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4329                         goto fail;
4330                 }
4331                 break;
4332         default:
4333                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4334                               adev->ip_versions[DCE_HWIP][0]);
4335         }
4336
4337         /* Determine whether to enable PSR support by default. */
4338         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4339                 switch (adev->ip_versions[DCE_HWIP][0]) {
4340                 case IP_VERSION(3, 1, 2):
4341                 case IP_VERSION(3, 1, 3):
4342                 case IP_VERSION(3, 1, 4):
4343                 case IP_VERSION(3, 1, 5):
4344                 case IP_VERSION(3, 1, 6):
4345                 case IP_VERSION(3, 2, 0):
4346                 case IP_VERSION(3, 2, 1):
4347                         psr_feature_enabled = true;
4348                         break;
4349                 default:
4350                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4351                         break;
4352                 }
4353         }
4354
4355         /* loops over all connectors on the board */
4356         for (i = 0; i < link_cnt; i++) {
4357                 struct dc_link *link = NULL;
4358
4359                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4360                         DRM_ERROR(
4361                                 "KMS: Cannot support more than %d display indexes\n",
4362                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4363                         continue;
4364                 }
4365
4366                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4367                 if (!aconnector)
4368                         goto fail;
4369
4370                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4371                 if (!aencoder)
4372                         goto fail;
4373
4374                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4375                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4376                         goto fail;
4377                 }
4378
4379                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4380                         DRM_ERROR("KMS: Failed to initialize connector\n");
4381                         goto fail;
4382                 }
4383
4384                 link = dc_get_link_at_index(dm->dc, i);
4385
4386                 if (!dc_link_detect_sink(link, &new_connection_type))
4387                         DRM_ERROR("KMS: Failed to detect connector\n");
4388
4389                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4390                         emulated_link_detect(link);
4391                         amdgpu_dm_update_connector_after_detect(aconnector);
4392                 } else {
4393                         bool ret = false;
4394
4395                         mutex_lock(&dm->dc_lock);
4396                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4397                         mutex_unlock(&dm->dc_lock);
4398
4399                         if (ret) {
4400                                 amdgpu_dm_update_connector_after_detect(aconnector);
4401                                 register_backlight_device(dm, link);
4402
4403                                 if (dm->num_of_edps)
4404                                         update_connector_ext_caps(aconnector);
4405
4406                                 if (psr_feature_enabled)
4407                                         amdgpu_dm_set_psr_caps(link);
4408
4409                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4410                                  * PSR is also supported.
4411                                  */
4412                                 if (link->psr_settings.psr_feature_enabled)
4413                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4414                         }
4415                 }
4416         }
4417
4418         /* Software is initialized. Now we can register interrupt handlers. */
4419         switch (adev->asic_type) {
4420 #if defined(CONFIG_DRM_AMD_DC_SI)
4421         case CHIP_TAHITI:
4422         case CHIP_PITCAIRN:
4423         case CHIP_VERDE:
4424         case CHIP_OLAND:
4425                 if (dce60_register_irq_handlers(dm->adev)) {
4426                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4427                         goto fail;
4428                 }
4429                 break;
4430 #endif
4431         case CHIP_BONAIRE:
4432         case CHIP_HAWAII:
4433         case CHIP_KAVERI:
4434         case CHIP_KABINI:
4435         case CHIP_MULLINS:
4436         case CHIP_TONGA:
4437         case CHIP_FIJI:
4438         case CHIP_CARRIZO:
4439         case CHIP_STONEY:
4440         case CHIP_POLARIS11:
4441         case CHIP_POLARIS10:
4442         case CHIP_POLARIS12:
4443         case CHIP_VEGAM:
4444         case CHIP_VEGA10:
4445         case CHIP_VEGA12:
4446         case CHIP_VEGA20:
4447                 if (dce110_register_irq_handlers(dm->adev)) {
4448                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4449                         goto fail;
4450                 }
4451                 break;
4452         default:
4453                 switch (adev->ip_versions[DCE_HWIP][0]) {
4454                 case IP_VERSION(1, 0, 0):
4455                 case IP_VERSION(1, 0, 1):
4456                 case IP_VERSION(2, 0, 2):
4457                 case IP_VERSION(2, 0, 3):
4458                 case IP_VERSION(2, 0, 0):
4459                 case IP_VERSION(2, 1, 0):
4460                 case IP_VERSION(3, 0, 0):
4461                 case IP_VERSION(3, 0, 2):
4462                 case IP_VERSION(3, 0, 3):
4463                 case IP_VERSION(3, 0, 1):
4464                 case IP_VERSION(3, 1, 2):
4465                 case IP_VERSION(3, 1, 3):
4466                 case IP_VERSION(3, 1, 4):
4467                 case IP_VERSION(3, 1, 5):
4468                 case IP_VERSION(3, 1, 6):
4469                 case IP_VERSION(3, 2, 0):
4470                 case IP_VERSION(3, 2, 1):
4471                         if (dcn10_register_irq_handlers(dm->adev)) {
4472                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4473                                 goto fail;
4474                         }
4475                         break;
4476                 default:
4477                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4478                                         adev->ip_versions[DCE_HWIP][0]);
4479                         goto fail;
4480                 }
4481                 break;
4482         }
4483
4484         return 0;
4485 fail:
4486         kfree(aencoder);
4487         kfree(aconnector);
4488
4489         return -EINVAL;
4490 }
4491
4492 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4493 {
4494         drm_atomic_private_obj_fini(&dm->atomic_obj);
4495         return;
4496 }
4497
4498 /******************************************************************************
4499  * amdgpu_display_funcs functions
4500  *****************************************************************************/
4501
4502 /*
4503  * dm_bandwidth_update - program display watermarks
4504  *
4505  * @adev: amdgpu_device pointer
4506  *
4507  * Calculate and program the display watermarks and line buffer allocation.
4508  */
4509 static void dm_bandwidth_update(struct amdgpu_device *adev)
4510 {
4511         /* TODO: implement later */
4512 }
4513
4514 static const struct amdgpu_display_funcs dm_display_funcs = {
4515         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4516         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4517         .backlight_set_level = NULL, /* never called for DC */
4518         .backlight_get_level = NULL, /* never called for DC */
4519         .hpd_sense = NULL,/* called unconditionally */
4520         .hpd_set_polarity = NULL, /* called unconditionally */
4521         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4522         .page_flip_get_scanoutpos =
4523                 dm_crtc_get_scanoutpos,/* called unconditionally */
4524         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4525         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4526 };
4527
4528 #if defined(CONFIG_DEBUG_KERNEL_DC)
4529
4530 static ssize_t s3_debug_store(struct device *device,
4531                               struct device_attribute *attr,
4532                               const char *buf,
4533                               size_t count)
4534 {
4535         int ret;
4536         int s3_state;
4537         struct drm_device *drm_dev = dev_get_drvdata(device);
4538         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4539
4540         ret = kstrtoint(buf, 0, &s3_state);
4541
4542         if (ret == 0) {
4543                 if (s3_state) {
4544                         dm_resume(adev);
4545                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4546                 } else
4547                         dm_suspend(adev);
4548         }
4549
4550         return ret == 0 ? count : 0;
4551 }
4552
4553 DEVICE_ATTR_WO(s3_debug);
4554
4555 #endif
4556
4557 static int dm_early_init(void *handle)
4558 {
4559         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4560
4561         switch (adev->asic_type) {
4562 #if defined(CONFIG_DRM_AMD_DC_SI)
4563         case CHIP_TAHITI:
4564         case CHIP_PITCAIRN:
4565         case CHIP_VERDE:
4566                 adev->mode_info.num_crtc = 6;
4567                 adev->mode_info.num_hpd = 6;
4568                 adev->mode_info.num_dig = 6;
4569                 break;
4570         case CHIP_OLAND:
4571                 adev->mode_info.num_crtc = 2;
4572                 adev->mode_info.num_hpd = 2;
4573                 adev->mode_info.num_dig = 2;
4574                 break;
4575 #endif
4576         case CHIP_BONAIRE:
4577         case CHIP_HAWAII:
4578                 adev->mode_info.num_crtc = 6;
4579                 adev->mode_info.num_hpd = 6;
4580                 adev->mode_info.num_dig = 6;
4581                 break;
4582         case CHIP_KAVERI:
4583                 adev->mode_info.num_crtc = 4;
4584                 adev->mode_info.num_hpd = 6;
4585                 adev->mode_info.num_dig = 7;
4586                 break;
4587         case CHIP_KABINI:
4588         case CHIP_MULLINS:
4589                 adev->mode_info.num_crtc = 2;
4590                 adev->mode_info.num_hpd = 6;
4591                 adev->mode_info.num_dig = 6;
4592                 break;
4593         case CHIP_FIJI:
4594         case CHIP_TONGA:
4595                 adev->mode_info.num_crtc = 6;
4596                 adev->mode_info.num_hpd = 6;
4597                 adev->mode_info.num_dig = 7;
4598                 break;
4599         case CHIP_CARRIZO:
4600                 adev->mode_info.num_crtc = 3;
4601                 adev->mode_info.num_hpd = 6;
4602                 adev->mode_info.num_dig = 9;
4603                 break;
4604         case CHIP_STONEY:
4605                 adev->mode_info.num_crtc = 2;
4606                 adev->mode_info.num_hpd = 6;
4607                 adev->mode_info.num_dig = 9;
4608                 break;
4609         case CHIP_POLARIS11:
4610         case CHIP_POLARIS12:
4611                 adev->mode_info.num_crtc = 5;
4612                 adev->mode_info.num_hpd = 5;
4613                 adev->mode_info.num_dig = 5;
4614                 break;
4615         case CHIP_POLARIS10:
4616         case CHIP_VEGAM:
4617                 adev->mode_info.num_crtc = 6;
4618                 adev->mode_info.num_hpd = 6;
4619                 adev->mode_info.num_dig = 6;
4620                 break;
4621         case CHIP_VEGA10:
4622         case CHIP_VEGA12:
4623         case CHIP_VEGA20:
4624                 adev->mode_info.num_crtc = 6;
4625                 adev->mode_info.num_hpd = 6;
4626                 adev->mode_info.num_dig = 6;
4627                 break;
4628         default:
4629
4630                 switch (adev->ip_versions[DCE_HWIP][0]) {
4631                 case IP_VERSION(2, 0, 2):
4632                 case IP_VERSION(3, 0, 0):
4633                         adev->mode_info.num_crtc = 6;
4634                         adev->mode_info.num_hpd = 6;
4635                         adev->mode_info.num_dig = 6;
4636                         break;
4637                 case IP_VERSION(2, 0, 0):
4638                 case IP_VERSION(3, 0, 2):
4639                         adev->mode_info.num_crtc = 5;
4640                         adev->mode_info.num_hpd = 5;
4641                         adev->mode_info.num_dig = 5;
4642                         break;
4643                 case IP_VERSION(2, 0, 3):
4644                 case IP_VERSION(3, 0, 3):
4645                         adev->mode_info.num_crtc = 2;
4646                         adev->mode_info.num_hpd = 2;
4647                         adev->mode_info.num_dig = 2;
4648                         break;
4649                 case IP_VERSION(1, 0, 0):
4650                 case IP_VERSION(1, 0, 1):
4651                 case IP_VERSION(3, 0, 1):
4652                 case IP_VERSION(2, 1, 0):
4653                 case IP_VERSION(3, 1, 2):
4654                 case IP_VERSION(3, 1, 3):
4655                 case IP_VERSION(3, 1, 4):
4656                 case IP_VERSION(3, 1, 5):
4657                 case IP_VERSION(3, 1, 6):
4658                 case IP_VERSION(3, 2, 0):
4659                 case IP_VERSION(3, 2, 1):
4660                         adev->mode_info.num_crtc = 4;
4661                         adev->mode_info.num_hpd = 4;
4662                         adev->mode_info.num_dig = 4;
4663                         break;
4664                 default:
4665                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4666                                         adev->ip_versions[DCE_HWIP][0]);
4667                         return -EINVAL;
4668                 }
4669                 break;
4670         }
4671
4672         amdgpu_dm_set_irq_funcs(adev);
4673
4674         if (adev->mode_info.funcs == NULL)
4675                 adev->mode_info.funcs = &dm_display_funcs;
4676
4677         /*
4678          * Note: Do NOT change adev->audio_endpt_rreg and
4679          * adev->audio_endpt_wreg because they are initialised in
4680          * amdgpu_device_init()
4681          */
4682 #if defined(CONFIG_DEBUG_KERNEL_DC)
4683         device_create_file(
4684                 adev_to_drm(adev)->dev,
4685                 &dev_attr_s3_debug);
4686 #endif
4687
4688         return 0;
4689 }
4690
4691 static bool modeset_required(struct drm_crtc_state *crtc_state,
4692                              struct dc_stream_state *new_stream,
4693                              struct dc_stream_state *old_stream)
4694 {
4695         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4696 }
4697
4698 static bool modereset_required(struct drm_crtc_state *crtc_state)
4699 {
4700         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4701 }
4702
4703 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4704 {
4705         drm_encoder_cleanup(encoder);
4706         kfree(encoder);
4707 }
4708
4709 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4710         .destroy = amdgpu_dm_encoder_destroy,
4711 };
4712
4713
4714 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4715                                          struct drm_framebuffer *fb,
4716                                          int *min_downscale, int *max_upscale)
4717 {
4718         struct amdgpu_device *adev = drm_to_adev(dev);
4719         struct dc *dc = adev->dm.dc;
4720         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4721         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4722
4723         switch (fb->format->format) {
4724         case DRM_FORMAT_P010:
4725         case DRM_FORMAT_NV12:
4726         case DRM_FORMAT_NV21:
4727                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4728                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4729                 break;
4730
4731         case DRM_FORMAT_XRGB16161616F:
4732         case DRM_FORMAT_ARGB16161616F:
4733         case DRM_FORMAT_XBGR16161616F:
4734         case DRM_FORMAT_ABGR16161616F:
4735                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4736                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4737                 break;
4738
4739         default:
4740                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4741                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4742                 break;
4743         }
4744
4745         /*
4746          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4747          * scaling factor of 1.0 == 1000 units.
4748          */
4749         if (*max_upscale == 1)
4750                 *max_upscale = 1000;
4751
4752         if (*min_downscale == 1)
4753                 *min_downscale = 1000;
4754 }
4755
4756
4757 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4758                                 const struct drm_plane_state *state,
4759                                 struct dc_scaling_info *scaling_info)
4760 {
4761         int scale_w, scale_h, min_downscale, max_upscale;
4762
4763         memset(scaling_info, 0, sizeof(*scaling_info));
4764
4765         /* Source is fixed 16.16 but we ignore mantissa for now... */
4766         scaling_info->src_rect.x = state->src_x >> 16;
4767         scaling_info->src_rect.y = state->src_y >> 16;
4768
4769         /*
4770          * For reasons we don't (yet) fully understand a non-zero
4771          * src_y coordinate into an NV12 buffer can cause a
4772          * system hang on DCN1x.
4773          * To avoid hangs (and maybe be overly cautious)
4774          * let's reject both non-zero src_x and src_y.
4775          *
4776          * We currently know of only one use-case to reproduce a
4777          * scenario with non-zero src_x and src_y for NV12, which
4778          * is to gesture the YouTube Android app into full screen
4779          * on ChromeOS.
4780          */
4781         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4782             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4783             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4784             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4785                 return -EINVAL;
4786
4787         scaling_info->src_rect.width = state->src_w >> 16;
4788         if (scaling_info->src_rect.width == 0)
4789                 return -EINVAL;
4790
4791         scaling_info->src_rect.height = state->src_h >> 16;
4792         if (scaling_info->src_rect.height == 0)
4793                 return -EINVAL;
4794
4795         scaling_info->dst_rect.x = state->crtc_x;
4796         scaling_info->dst_rect.y = state->crtc_y;
4797
4798         if (state->crtc_w == 0)
4799                 return -EINVAL;
4800
4801         scaling_info->dst_rect.width = state->crtc_w;
4802
4803         if (state->crtc_h == 0)
4804                 return -EINVAL;
4805
4806         scaling_info->dst_rect.height = state->crtc_h;
4807
4808         /* DRM doesn't specify clipping on destination output. */
4809         scaling_info->clip_rect = scaling_info->dst_rect;
4810
4811         /* Validate scaling per-format with DC plane caps */
4812         if (state->plane && state->plane->dev && state->fb) {
4813                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4814                                              &min_downscale, &max_upscale);
4815         } else {
4816                 min_downscale = 250;
4817                 max_upscale = 16000;
4818         }
4819
4820         scale_w = scaling_info->dst_rect.width * 1000 /
4821                   scaling_info->src_rect.width;
4822
4823         if (scale_w < min_downscale || scale_w > max_upscale)
4824                 return -EINVAL;
4825
4826         scale_h = scaling_info->dst_rect.height * 1000 /
4827                   scaling_info->src_rect.height;
4828
4829         if (scale_h < min_downscale || scale_h > max_upscale)
4830                 return -EINVAL;
4831
4832         /*
4833          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4834          * assume reasonable defaults based on the format.
4835          */
4836
4837         return 0;
4838 }
4839
4840 static void
4841 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4842                                  uint64_t tiling_flags)
4843 {
4844         /* Fill GFX8 params */
4845         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4846                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4847
4848                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4849                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4850                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4851                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4852                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4853
4854                 /* XXX fix me for VI */
4855                 tiling_info->gfx8.num_banks = num_banks;
4856                 tiling_info->gfx8.array_mode =
4857                                 DC_ARRAY_2D_TILED_THIN1;
4858                 tiling_info->gfx8.tile_split = tile_split;
4859                 tiling_info->gfx8.bank_width = bankw;
4860                 tiling_info->gfx8.bank_height = bankh;
4861                 tiling_info->gfx8.tile_aspect = mtaspect;
4862                 tiling_info->gfx8.tile_mode =
4863                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4864         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4865                         == DC_ARRAY_1D_TILED_THIN1) {
4866                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4867         }
4868
4869         tiling_info->gfx8.pipe_config =
4870                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4871 }
4872
4873 static void
4874 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4875                                   union dc_tiling_info *tiling_info)
4876 {
4877         tiling_info->gfx9.num_pipes =
4878                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4879         tiling_info->gfx9.num_banks =
4880                 adev->gfx.config.gb_addr_config_fields.num_banks;
4881         tiling_info->gfx9.pipe_interleave =
4882                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4883         tiling_info->gfx9.num_shader_engines =
4884                 adev->gfx.config.gb_addr_config_fields.num_se;
4885         tiling_info->gfx9.max_compressed_frags =
4886                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4887         tiling_info->gfx9.num_rb_per_se =
4888                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4889         tiling_info->gfx9.shaderEnable = 1;
4890         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4891                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4892 }
4893
4894 static int
4895 validate_dcc(struct amdgpu_device *adev,
4896              const enum surface_pixel_format format,
4897              const enum dc_rotation_angle rotation,
4898              const union dc_tiling_info *tiling_info,
4899              const struct dc_plane_dcc_param *dcc,
4900              const struct dc_plane_address *address,
4901              const struct plane_size *plane_size)
4902 {
4903         struct dc *dc = adev->dm.dc;
4904         struct dc_dcc_surface_param input;
4905         struct dc_surface_dcc_cap output;
4906
4907         memset(&input, 0, sizeof(input));
4908         memset(&output, 0, sizeof(output));
4909
4910         if (!dcc->enable)
4911                 return 0;
4912
4913         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4914             !dc->cap_funcs.get_dcc_compression_cap)
4915                 return -EINVAL;
4916
4917         input.format = format;
4918         input.surface_size.width = plane_size->surface_size.width;
4919         input.surface_size.height = plane_size->surface_size.height;
4920         input.swizzle_mode = tiling_info->gfx9.swizzle;
4921
4922         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4923                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4924         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4925                 input.scan = SCAN_DIRECTION_VERTICAL;
4926
4927         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4928                 return -EINVAL;
4929
4930         if (!output.capable)
4931                 return -EINVAL;
4932
4933         if (dcc->independent_64b_blks == 0 &&
4934             output.grph.rgb.independent_64b_blks != 0)
4935                 return -EINVAL;
4936
4937         return 0;
4938 }
4939
4940 static bool
4941 modifier_has_dcc(uint64_t modifier)
4942 {
4943         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4944 }
4945
4946 static unsigned
4947 modifier_gfx9_swizzle_mode(uint64_t modifier)
4948 {
4949         if (modifier == DRM_FORMAT_MOD_LINEAR)
4950                 return 0;
4951
4952         return AMD_FMT_MOD_GET(TILE, modifier);
4953 }
4954
4955 static const struct drm_format_info *
4956 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4957 {
4958         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4959 }
4960
4961 static void
4962 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4963                                     union dc_tiling_info *tiling_info,
4964                                     uint64_t modifier)
4965 {
4966         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4967         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4968         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4969         unsigned int pipes_log2;
4970
4971         pipes_log2 = min(5u, mod_pipe_xor_bits);
4972
4973         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4974
4975         if (!IS_AMD_FMT_MOD(modifier))
4976                 return;
4977
4978         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4979         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4980
4981         if (adev->family >= AMDGPU_FAMILY_NV) {
4982                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4983         } else {
4984                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4985
4986                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4987         }
4988 }
4989
4990 enum dm_micro_swizzle {
4991         MICRO_SWIZZLE_Z = 0,
4992         MICRO_SWIZZLE_S = 1,
4993         MICRO_SWIZZLE_D = 2,
4994         MICRO_SWIZZLE_R = 3
4995 };
4996
4997 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4998                                           uint32_t format,
4999                                           uint64_t modifier)
5000 {
5001         struct amdgpu_device *adev = drm_to_adev(plane->dev);
5002         const struct drm_format_info *info = drm_format_info(format);
5003         int i;
5004
5005         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
5006
5007         if (!info)
5008                 return false;
5009
5010         /*
5011          * We always have to allow these modifiers:
5012          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
5013          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
5014          */
5015         if (modifier == DRM_FORMAT_MOD_LINEAR ||
5016             modifier == DRM_FORMAT_MOD_INVALID) {
5017                 return true;
5018         }
5019
5020         /* Check that the modifier is on the list of the plane's supported modifiers. */
5021         for (i = 0; i < plane->modifier_count; i++) {
5022                 if (modifier == plane->modifiers[i])
5023                         break;
5024         }
5025         if (i == plane->modifier_count)
5026                 return false;
5027
5028         /*
5029          * For D swizzle the canonical modifier depends on the bpp, so check
5030          * it here.
5031          */
5032         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5033             adev->family >= AMDGPU_FAMILY_NV) {
5034                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5035                         return false;
5036         }
5037
5038         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5039             info->cpp[0] < 8)
5040                 return false;
5041
5042         if (modifier_has_dcc(modifier)) {
5043                 /* Per radeonsi comments 16/64 bpp are more complicated. */
5044                 if (info->cpp[0] != 4)
5045                         return false;
5046                 /* We support multi-planar formats, but not when combined with
5047                  * additional DCC metadata planes. */
5048                 if (info->num_planes > 1)
5049                         return false;
5050         }
5051
5052         return true;
5053 }
5054
5055 static void
5056 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5057 {
5058         if (!*mods)
5059                 return;
5060
5061         if (*cap - *size < 1) {
5062                 uint64_t new_cap = *cap * 2;
5063                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5064
5065                 if (!new_mods) {
5066                         kfree(*mods);
5067                         *mods = NULL;
5068                         return;
5069                 }
5070
5071                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5072                 kfree(*mods);
5073                 *mods = new_mods;
5074                 *cap = new_cap;
5075         }
5076
5077         (*mods)[*size] = mod;
5078         *size += 1;
5079 }
5080
5081 static void
5082 add_gfx9_modifiers(const struct amdgpu_device *adev,
5083                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5084 {
5085         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5086         int pipe_xor_bits = min(8, pipes +
5087                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5088         int bank_xor_bits = min(8 - pipe_xor_bits,
5089                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5090         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5091                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5092
5093
5094         if (adev->family == AMDGPU_FAMILY_RV) {
5095                 /* Raven2 and later */
5096                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5097
5098                 /*
5099                  * No _D DCC swizzles yet because we only allow 32bpp, which
5100                  * doesn't support _D on DCN
5101                  */
5102
5103                 if (has_constant_encode) {
5104                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5106                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5107                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5108                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5109                                     AMD_FMT_MOD_SET(DCC, 1) |
5110                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5111                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5112                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5113                 }
5114
5115                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5117                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5118                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5119                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5120                             AMD_FMT_MOD_SET(DCC, 1) |
5121                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5122                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5123                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5124
5125                 if (has_constant_encode) {
5126                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5128                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5129                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5130                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5131                                     AMD_FMT_MOD_SET(DCC, 1) |
5132                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5133                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5134                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5135
5136                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5137                                     AMD_FMT_MOD_SET(RB, rb) |
5138                                     AMD_FMT_MOD_SET(PIPE, pipes));
5139                 }
5140
5141                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5144                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5146                             AMD_FMT_MOD_SET(DCC, 1) |
5147                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5148                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5149                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5150                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5151                             AMD_FMT_MOD_SET(RB, rb) |
5152                             AMD_FMT_MOD_SET(PIPE, pipes));
5153         }
5154
5155         /*
5156          * Only supported for 64bpp on Raven, will be filtered on format in
5157          * dm_plane_format_mod_supported.
5158          */
5159         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5161                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5162                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5163                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5164
5165         if (adev->family == AMDGPU_FAMILY_RV) {
5166                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5168                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5169                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5171         }
5172
5173         /*
5174          * Only supported for 64bpp on Raven, will be filtered on format in
5175          * dm_plane_format_mod_supported.
5176          */
5177         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5179                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5180
5181         if (adev->family == AMDGPU_FAMILY_RV) {
5182                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5184                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5185         }
5186 }
5187
5188 static void
5189 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5190                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5191 {
5192         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5193
5194         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5195                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5196                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5197                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5198                     AMD_FMT_MOD_SET(DCC, 1) |
5199                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5200                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5201                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5202
5203         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5204                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5205                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5206                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5207                     AMD_FMT_MOD_SET(DCC, 1) |
5208                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5209                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5210                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5211                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5212
5213         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5214                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5215                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5216                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5217
5218         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5219                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5220                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5221                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5222
5223
5224         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5225         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5227                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5228
5229         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5231                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5232 }
5233
5234 static void
5235 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5236                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5237 {
5238         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5239         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5240
5241         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5242                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5243                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5244                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5245                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5246                     AMD_FMT_MOD_SET(DCC, 1) |
5247                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5248                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5249                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5250                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5251
5252         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5253                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5254                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5255                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5256                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5257                     AMD_FMT_MOD_SET(DCC, 1) |
5258                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5259                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5260                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5261
5262         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5263                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5264                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5265                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5266                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5267                     AMD_FMT_MOD_SET(DCC, 1) |
5268                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5269                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5270                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5271                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5272                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5273
5274         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5275                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5276                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5277                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5278                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5279                     AMD_FMT_MOD_SET(DCC, 1) |
5280                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5281                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5282                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5283                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5284
5285         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5286                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5287                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5288                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5289                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5290
5291         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5292                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5293                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5294                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5295                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5296
5297         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5298         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5299                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5300                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5301
5302         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5303                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5304                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5305 }
5306
5307 static void
5308 add_gfx11_modifiers(struct amdgpu_device *adev,
5309                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5310 {
5311         int num_pipes = 0;
5312         int pipe_xor_bits = 0;
5313         int num_pkrs = 0;
5314         int pkrs = 0;
5315         u32 gb_addr_config;
5316         u8 i = 0;
5317         unsigned swizzle_r_x;
5318         uint64_t modifier_r_x;
5319         uint64_t modifier_dcc_best;
5320         uint64_t modifier_dcc_4k;
5321
5322         /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5323          * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5324         gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5325         ASSERT(gb_addr_config != 0);
5326
5327         num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5328         pkrs = ilog2(num_pkrs);
5329         num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5330         pipe_xor_bits = ilog2(num_pipes);
5331
5332         for (i = 0; i < 2; i++) {
5333                 /* Insert the best one first. */
5334                 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5335                 if (num_pipes > 16)
5336                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5337                 else
5338                         swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5339
5340                 modifier_r_x = AMD_FMT_MOD |
5341                                AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5342                                AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5343                                AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5344                                AMD_FMT_MOD_SET(PACKERS, pkrs);
5345
5346                 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5347                 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5348                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5349                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5350                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5351
5352                 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5353                 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5354                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5355                                   AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5356                                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5357
5358                 add_modifier(mods, size, capacity, modifier_dcc_best);
5359                 add_modifier(mods, size, capacity, modifier_dcc_4k);
5360
5361                 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5362                 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5363
5364                 add_modifier(mods, size, capacity, modifier_r_x);
5365         }
5366
5367         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5368              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5369                          AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5370 }
5371
5372 static int
5373 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5374 {
5375         uint64_t size = 0, capacity = 128;
5376         *mods = NULL;
5377
5378         /* We have not hooked up any pre-GFX9 modifiers. */
5379         if (adev->family < AMDGPU_FAMILY_AI)
5380                 return 0;
5381
5382         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5383
5384         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5385                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5386                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5387                 return *mods ? 0 : -ENOMEM;
5388         }
5389
5390         switch (adev->family) {
5391         case AMDGPU_FAMILY_AI:
5392         case AMDGPU_FAMILY_RV:
5393                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5394                 break;
5395         case AMDGPU_FAMILY_NV:
5396         case AMDGPU_FAMILY_VGH:
5397         case AMDGPU_FAMILY_YC:
5398         case AMDGPU_FAMILY_GC_10_3_6:
5399         case AMDGPU_FAMILY_GC_10_3_7:
5400                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5401                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5402                 else
5403                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5404                 break;
5405         case AMDGPU_FAMILY_GC_11_0_0:
5406         case AMDGPU_FAMILY_GC_11_0_2:
5407                 add_gfx11_modifiers(adev, mods, &size, &capacity);
5408                 break;
5409         }
5410
5411         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5412
5413         /* INVALID marks the end of the list. */
5414         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5415
5416         if (!*mods)
5417                 return -ENOMEM;
5418
5419         return 0;
5420 }
5421
5422 static int
5423 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5424                                           const struct amdgpu_framebuffer *afb,
5425                                           const enum surface_pixel_format format,
5426                                           const enum dc_rotation_angle rotation,
5427                                           const struct plane_size *plane_size,
5428                                           union dc_tiling_info *tiling_info,
5429                                           struct dc_plane_dcc_param *dcc,
5430                                           struct dc_plane_address *address,
5431                                           const bool force_disable_dcc)
5432 {
5433         const uint64_t modifier = afb->base.modifier;
5434         int ret = 0;
5435
5436         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5437         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5438
5439         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5440                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5441                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5442                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5443
5444                 dcc->enable = 1;
5445                 dcc->meta_pitch = afb->base.pitches[1];
5446                 dcc->independent_64b_blks = independent_64b_blks;
5447                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5448                         if (independent_64b_blks && independent_128b_blks)
5449                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5450                         else if (independent_128b_blks)
5451                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5452                         else if (independent_64b_blks && !independent_128b_blks)
5453                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5454                         else
5455                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5456                 } else {
5457                         if (independent_64b_blks)
5458                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5459                         else
5460                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5461                 }
5462
5463                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5464                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5465         }
5466
5467         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5468         if (ret)
5469                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5470
5471         return ret;
5472 }
5473
5474 static int
5475 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5476                              const struct amdgpu_framebuffer *afb,
5477                              const enum surface_pixel_format format,
5478                              const enum dc_rotation_angle rotation,
5479                              const uint64_t tiling_flags,
5480                              union dc_tiling_info *tiling_info,
5481                              struct plane_size *plane_size,
5482                              struct dc_plane_dcc_param *dcc,
5483                              struct dc_plane_address *address,
5484                              bool tmz_surface,
5485                              bool force_disable_dcc)
5486 {
5487         const struct drm_framebuffer *fb = &afb->base;
5488         int ret;
5489
5490         memset(tiling_info, 0, sizeof(*tiling_info));
5491         memset(plane_size, 0, sizeof(*plane_size));
5492         memset(dcc, 0, sizeof(*dcc));
5493         memset(address, 0, sizeof(*address));
5494
5495         address->tmz_surface = tmz_surface;
5496
5497         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5498                 uint64_t addr = afb->address + fb->offsets[0];
5499
5500                 plane_size->surface_size.x = 0;
5501                 plane_size->surface_size.y = 0;
5502                 plane_size->surface_size.width = fb->width;
5503                 plane_size->surface_size.height = fb->height;
5504                 plane_size->surface_pitch =
5505                         fb->pitches[0] / fb->format->cpp[0];
5506
5507                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5508                 address->grph.addr.low_part = lower_32_bits(addr);
5509                 address->grph.addr.high_part = upper_32_bits(addr);
5510         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5511                 uint64_t luma_addr = afb->address + fb->offsets[0];
5512                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5513
5514                 plane_size->surface_size.x = 0;
5515                 plane_size->surface_size.y = 0;
5516                 plane_size->surface_size.width = fb->width;
5517                 plane_size->surface_size.height = fb->height;
5518                 plane_size->surface_pitch =
5519                         fb->pitches[0] / fb->format->cpp[0];
5520
5521                 plane_size->chroma_size.x = 0;
5522                 plane_size->chroma_size.y = 0;
5523                 /* TODO: set these based on surface format */
5524                 plane_size->chroma_size.width = fb->width / 2;
5525                 plane_size->chroma_size.height = fb->height / 2;
5526
5527                 plane_size->chroma_pitch =
5528                         fb->pitches[1] / fb->format->cpp[1];
5529
5530                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5531                 address->video_progressive.luma_addr.low_part =
5532                         lower_32_bits(luma_addr);
5533                 address->video_progressive.luma_addr.high_part =
5534                         upper_32_bits(luma_addr);
5535                 address->video_progressive.chroma_addr.low_part =
5536                         lower_32_bits(chroma_addr);
5537                 address->video_progressive.chroma_addr.high_part =
5538                         upper_32_bits(chroma_addr);
5539         }
5540
5541         if (adev->family >= AMDGPU_FAMILY_AI) {
5542                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5543                                                                 rotation, plane_size,
5544                                                                 tiling_info, dcc,
5545                                                                 address,
5546                                                                 force_disable_dcc);
5547                 if (ret)
5548                         return ret;
5549         } else {
5550                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5551         }
5552
5553         return 0;
5554 }
5555
5556 static void
5557 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5558                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5559                                bool *global_alpha, int *global_alpha_value)
5560 {
5561         *per_pixel_alpha = false;
5562         *pre_multiplied_alpha = true;
5563         *global_alpha = false;
5564         *global_alpha_value = 0xff;
5565
5566         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5567                 return;
5568
5569         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5570                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5571                 static const uint32_t alpha_formats[] = {
5572                         DRM_FORMAT_ARGB8888,
5573                         DRM_FORMAT_RGBA8888,
5574                         DRM_FORMAT_ABGR8888,
5575                 };
5576                 uint32_t format = plane_state->fb->format->format;
5577                 unsigned int i;
5578
5579                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5580                         if (format == alpha_formats[i]) {
5581                                 *per_pixel_alpha = true;
5582                                 break;
5583                         }
5584                 }
5585
5586                 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5587                         *pre_multiplied_alpha = false;
5588         }
5589
5590         if (plane_state->alpha < 0xffff) {
5591                 *global_alpha = true;
5592                 *global_alpha_value = plane_state->alpha >> 8;
5593         }
5594 }
5595
5596 static int
5597 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5598                             const enum surface_pixel_format format,
5599                             enum dc_color_space *color_space)
5600 {
5601         bool full_range;
5602
5603         *color_space = COLOR_SPACE_SRGB;
5604
5605         /* DRM color properties only affect non-RGB formats. */
5606         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5607                 return 0;
5608
5609         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5610
5611         switch (plane_state->color_encoding) {
5612         case DRM_COLOR_YCBCR_BT601:
5613                 if (full_range)
5614                         *color_space = COLOR_SPACE_YCBCR601;
5615                 else
5616                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5617                 break;
5618
5619         case DRM_COLOR_YCBCR_BT709:
5620                 if (full_range)
5621                         *color_space = COLOR_SPACE_YCBCR709;
5622                 else
5623                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5624                 break;
5625
5626         case DRM_COLOR_YCBCR_BT2020:
5627                 if (full_range)
5628                         *color_space = COLOR_SPACE_2020_YCBCR;
5629                 else
5630                         return -EINVAL;
5631                 break;
5632
5633         default:
5634                 return -EINVAL;
5635         }
5636
5637         return 0;
5638 }
5639
5640 static int
5641 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5642                             const struct drm_plane_state *plane_state,
5643                             const uint64_t tiling_flags,
5644                             struct dc_plane_info *plane_info,
5645                             struct dc_plane_address *address,
5646                             bool tmz_surface,
5647                             bool force_disable_dcc)
5648 {
5649         const struct drm_framebuffer *fb = plane_state->fb;
5650         const struct amdgpu_framebuffer *afb =
5651                 to_amdgpu_framebuffer(plane_state->fb);
5652         int ret;
5653
5654         memset(plane_info, 0, sizeof(*plane_info));
5655
5656         switch (fb->format->format) {
5657         case DRM_FORMAT_C8:
5658                 plane_info->format =
5659                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5660                 break;
5661         case DRM_FORMAT_RGB565:
5662                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5663                 break;
5664         case DRM_FORMAT_XRGB8888:
5665         case DRM_FORMAT_ARGB8888:
5666                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5667                 break;
5668         case DRM_FORMAT_XRGB2101010:
5669         case DRM_FORMAT_ARGB2101010:
5670                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5671                 break;
5672         case DRM_FORMAT_XBGR2101010:
5673         case DRM_FORMAT_ABGR2101010:
5674                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5675                 break;
5676         case DRM_FORMAT_XBGR8888:
5677         case DRM_FORMAT_ABGR8888:
5678                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5679                 break;
5680         case DRM_FORMAT_NV21:
5681                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5682                 break;
5683         case DRM_FORMAT_NV12:
5684                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5685                 break;
5686         case DRM_FORMAT_P010:
5687                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5688                 break;
5689         case DRM_FORMAT_XRGB16161616F:
5690         case DRM_FORMAT_ARGB16161616F:
5691                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5692                 break;
5693         case DRM_FORMAT_XBGR16161616F:
5694         case DRM_FORMAT_ABGR16161616F:
5695                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5696                 break;
5697         case DRM_FORMAT_XRGB16161616:
5698         case DRM_FORMAT_ARGB16161616:
5699                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5700                 break;
5701         case DRM_FORMAT_XBGR16161616:
5702         case DRM_FORMAT_ABGR16161616:
5703                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5704                 break;
5705         default:
5706                 DRM_ERROR(
5707                         "Unsupported screen format %p4cc\n",
5708                         &fb->format->format);
5709                 return -EINVAL;
5710         }
5711
5712         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5713         case DRM_MODE_ROTATE_0:
5714                 plane_info->rotation = ROTATION_ANGLE_0;
5715                 break;
5716         case DRM_MODE_ROTATE_90:
5717                 plane_info->rotation = ROTATION_ANGLE_90;
5718                 break;
5719         case DRM_MODE_ROTATE_180:
5720                 plane_info->rotation = ROTATION_ANGLE_180;
5721                 break;
5722         case DRM_MODE_ROTATE_270:
5723                 plane_info->rotation = ROTATION_ANGLE_270;
5724                 break;
5725         default:
5726                 plane_info->rotation = ROTATION_ANGLE_0;
5727                 break;
5728         }
5729
5730         plane_info->visible = true;
5731         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5732
5733         plane_info->layer_index = 0;
5734
5735         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5736                                           &plane_info->color_space);
5737         if (ret)
5738                 return ret;
5739
5740         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5741                                            plane_info->rotation, tiling_flags,
5742                                            &plane_info->tiling_info,
5743                                            &plane_info->plane_size,
5744                                            &plane_info->dcc, address, tmz_surface,
5745                                            force_disable_dcc);
5746         if (ret)
5747                 return ret;
5748
5749         fill_blending_from_plane_state(
5750                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5751                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5752
5753         return 0;
5754 }
5755
5756 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5757                                     struct dc_plane_state *dc_plane_state,
5758                                     struct drm_plane_state *plane_state,
5759                                     struct drm_crtc_state *crtc_state)
5760 {
5761         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5762         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5763         struct dc_scaling_info scaling_info;
5764         struct dc_plane_info plane_info;
5765         int ret;
5766         bool force_disable_dcc = false;
5767
5768         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5769         if (ret)
5770                 return ret;
5771
5772         dc_plane_state->src_rect = scaling_info.src_rect;
5773         dc_plane_state->dst_rect = scaling_info.dst_rect;
5774         dc_plane_state->clip_rect = scaling_info.clip_rect;
5775         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5776
5777         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5778         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5779                                           afb->tiling_flags,
5780                                           &plane_info,
5781                                           &dc_plane_state->address,
5782                                           afb->tmz_surface,
5783                                           force_disable_dcc);
5784         if (ret)
5785                 return ret;
5786
5787         dc_plane_state->format = plane_info.format;
5788         dc_plane_state->color_space = plane_info.color_space;
5789         dc_plane_state->format = plane_info.format;
5790         dc_plane_state->plane_size = plane_info.plane_size;
5791         dc_plane_state->rotation = plane_info.rotation;
5792         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5793         dc_plane_state->stereo_format = plane_info.stereo_format;
5794         dc_plane_state->tiling_info = plane_info.tiling_info;
5795         dc_plane_state->visible = plane_info.visible;
5796         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5797         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5798         dc_plane_state->global_alpha = plane_info.global_alpha;
5799         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5800         dc_plane_state->dcc = plane_info.dcc;
5801         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5802         dc_plane_state->flip_int_enabled = true;
5803
5804         /*
5805          * Always set input transfer function, since plane state is refreshed
5806          * every time.
5807          */
5808         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5809         if (ret)
5810                 return ret;
5811
5812         return 0;
5813 }
5814
5815 /**
5816  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5817  *
5818  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5819  *         remote fb
5820  * @old_plane_state: Old state of @plane
5821  * @new_plane_state: New state of @plane
5822  * @crtc_state: New state of CRTC connected to the @plane
5823  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5824  *
5825  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5826  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5827  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5828  * amdgpu_dm's.
5829  *
5830  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5831  * plane with regions that require flushing to the eDP remote buffer. In
5832  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5833  * implicitly provide damage clips without any client support via the plane
5834  * bounds.
5835  *
5836  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5837  *
5838  * TODO: Also enable for FB_DAMAGE_CLIPS
5839  */
5840 static void fill_dc_dirty_rects(struct drm_plane *plane,
5841                                 struct drm_plane_state *old_plane_state,
5842                                 struct drm_plane_state *new_plane_state,
5843                                 struct drm_crtc_state *crtc_state,
5844                                 struct dc_flip_addrs *flip_addrs)
5845 {
5846         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5847         struct rect *dirty_rects = flip_addrs->dirty_rects;
5848         uint32_t num_clips;
5849         bool bb_changed;
5850         bool fb_changed;
5851         uint32_t i = 0;
5852
5853         flip_addrs->dirty_rect_count = 0;
5854
5855         /*
5856          * Cursor plane has it's own dirty rect update interface. See
5857          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5858          */
5859         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5860                 return;
5861
5862         /*
5863          * Today, we only consider MPO use-case for PSR SU. If MPO not
5864          * requested, and there is a plane update, do FFU.
5865          */
5866         if (!dm_crtc_state->mpo_requested) {
5867                 dirty_rects[0].x = 0;
5868                 dirty_rects[0].y = 0;
5869                 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5870                 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5871                 flip_addrs->dirty_rect_count = 1;
5872                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5873                                  new_plane_state->plane->base.id,
5874                                  dm_crtc_state->base.mode.crtc_hdisplay,
5875                                  dm_crtc_state->base.mode.crtc_vdisplay);
5876                 return;
5877         }
5878
5879         /*
5880          * MPO is requested. Add entire plane bounding box to dirty rects if
5881          * flipped to or damaged.
5882          *
5883          * If plane is moved or resized, also add old bounding box to dirty
5884          * rects.
5885          */
5886         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5887         fb_changed = old_plane_state->fb->base.id !=
5888                      new_plane_state->fb->base.id;
5889         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5890                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5891                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5892                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5893
5894         DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5895                          new_plane_state->plane->base.id,
5896                          bb_changed, fb_changed, num_clips);
5897
5898         if (num_clips || fb_changed || bb_changed) {
5899                 dirty_rects[i].x = new_plane_state->crtc_x;
5900                 dirty_rects[i].y = new_plane_state->crtc_y;
5901                 dirty_rects[i].width = new_plane_state->crtc_w;
5902                 dirty_rects[i].height = new_plane_state->crtc_h;
5903                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5904                                  new_plane_state->plane->base.id,
5905                                  dirty_rects[i].x, dirty_rects[i].y,
5906                                  dirty_rects[i].width, dirty_rects[i].height);
5907                 i += 1;
5908         }
5909
5910         /* Add old plane bounding-box if plane is moved or resized */
5911         if (bb_changed) {
5912                 dirty_rects[i].x = old_plane_state->crtc_x;
5913                 dirty_rects[i].y = old_plane_state->crtc_y;
5914                 dirty_rects[i].width = old_plane_state->crtc_w;
5915                 dirty_rects[i].height = old_plane_state->crtc_h;
5916                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5917                                 old_plane_state->plane->base.id,
5918                                 dirty_rects[i].x, dirty_rects[i].y,
5919                                 dirty_rects[i].width, dirty_rects[i].height);
5920                 i += 1;
5921         }
5922
5923         flip_addrs->dirty_rect_count = i;
5924 }
5925
5926 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5927                                            const struct dm_connector_state *dm_state,
5928                                            struct dc_stream_state *stream)
5929 {
5930         enum amdgpu_rmx_type rmx_type;
5931
5932         struct rect src = { 0 }; /* viewport in composition space*/
5933         struct rect dst = { 0 }; /* stream addressable area */
5934
5935         /* no mode. nothing to be done */
5936         if (!mode)
5937                 return;
5938
5939         /* Full screen scaling by default */
5940         src.width = mode->hdisplay;
5941         src.height = mode->vdisplay;
5942         dst.width = stream->timing.h_addressable;
5943         dst.height = stream->timing.v_addressable;
5944
5945         if (dm_state) {
5946                 rmx_type = dm_state->scaling;
5947                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5948                         if (src.width * dst.height <
5949                                         src.height * dst.width) {
5950                                 /* height needs less upscaling/more downscaling */
5951                                 dst.width = src.width *
5952                                                 dst.height / src.height;
5953                         } else {
5954                                 /* width needs less upscaling/more downscaling */
5955                                 dst.height = src.height *
5956                                                 dst.width / src.width;
5957                         }
5958                 } else if (rmx_type == RMX_CENTER) {
5959                         dst = src;
5960                 }
5961
5962                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5963                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5964
5965                 if (dm_state->underscan_enable) {
5966                         dst.x += dm_state->underscan_hborder / 2;
5967                         dst.y += dm_state->underscan_vborder / 2;
5968                         dst.width -= dm_state->underscan_hborder;
5969                         dst.height -= dm_state->underscan_vborder;
5970                 }
5971         }
5972
5973         stream->src = src;
5974         stream->dst = dst;
5975
5976         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5977                       dst.x, dst.y, dst.width, dst.height);
5978
5979 }
5980
5981 static enum dc_color_depth
5982 convert_color_depth_from_display_info(const struct drm_connector *connector,
5983                                       bool is_y420, int requested_bpc)
5984 {
5985         uint8_t bpc;
5986
5987         if (is_y420) {
5988                 bpc = 8;
5989
5990                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5991                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5992                         bpc = 16;
5993                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5994                         bpc = 12;
5995                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5996                         bpc = 10;
5997         } else {
5998                 bpc = (uint8_t)connector->display_info.bpc;
5999                 /* Assume 8 bpc by default if no bpc is specified. */
6000                 bpc = bpc ? bpc : 8;
6001         }
6002
6003         if (requested_bpc > 0) {
6004                 /*
6005                  * Cap display bpc based on the user requested value.
6006                  *
6007                  * The value for state->max_bpc may not correctly updated
6008                  * depending on when the connector gets added to the state
6009                  * or if this was called outside of atomic check, so it
6010                  * can't be used directly.
6011                  */
6012                 bpc = min_t(u8, bpc, requested_bpc);
6013
6014                 /* Round down to the nearest even number. */
6015                 bpc = bpc - (bpc & 1);
6016         }
6017
6018         switch (bpc) {
6019         case 0:
6020                 /*
6021                  * Temporary Work around, DRM doesn't parse color depth for
6022                  * EDID revision before 1.4
6023                  * TODO: Fix edid parsing
6024                  */
6025                 return COLOR_DEPTH_888;
6026         case 6:
6027                 return COLOR_DEPTH_666;
6028         case 8:
6029                 return COLOR_DEPTH_888;
6030         case 10:
6031                 return COLOR_DEPTH_101010;
6032         case 12:
6033                 return COLOR_DEPTH_121212;
6034         case 14:
6035                 return COLOR_DEPTH_141414;
6036         case 16:
6037                 return COLOR_DEPTH_161616;
6038         default:
6039                 return COLOR_DEPTH_UNDEFINED;
6040         }
6041 }
6042
6043 static enum dc_aspect_ratio
6044 get_aspect_ratio(const struct drm_display_mode *mode_in)
6045 {
6046         /* 1-1 mapping, since both enums follow the HDMI spec. */
6047         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6048 }
6049
6050 static enum dc_color_space
6051 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6052 {
6053         enum dc_color_space color_space = COLOR_SPACE_SRGB;
6054
6055         switch (dc_crtc_timing->pixel_encoding) {
6056         case PIXEL_ENCODING_YCBCR422:
6057         case PIXEL_ENCODING_YCBCR444:
6058         case PIXEL_ENCODING_YCBCR420:
6059         {
6060                 /*
6061                  * 27030khz is the separation point between HDTV and SDTV
6062                  * according to HDMI spec, we use YCbCr709 and YCbCr601
6063                  * respectively
6064                  */
6065                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
6066                         if (dc_crtc_timing->flags.Y_ONLY)
6067                                 color_space =
6068                                         COLOR_SPACE_YCBCR709_LIMITED;
6069                         else
6070                                 color_space = COLOR_SPACE_YCBCR709;
6071                 } else {
6072                         if (dc_crtc_timing->flags.Y_ONLY)
6073                                 color_space =
6074                                         COLOR_SPACE_YCBCR601_LIMITED;
6075                         else
6076                                 color_space = COLOR_SPACE_YCBCR601;
6077                 }
6078
6079         }
6080         break;
6081         case PIXEL_ENCODING_RGB:
6082                 color_space = COLOR_SPACE_SRGB;
6083                 break;
6084
6085         default:
6086                 WARN_ON(1);
6087                 break;
6088         }
6089
6090         return color_space;
6091 }
6092
6093 static bool adjust_colour_depth_from_display_info(
6094         struct dc_crtc_timing *timing_out,
6095         const struct drm_display_info *info)
6096 {
6097         enum dc_color_depth depth = timing_out->display_color_depth;
6098         int normalized_clk;
6099         do {
6100                 normalized_clk = timing_out->pix_clk_100hz / 10;
6101                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6102                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6103                         normalized_clk /= 2;
6104                 /* Adjusting pix clock following on HDMI spec based on colour depth */
6105                 switch (depth) {
6106                 case COLOR_DEPTH_888:
6107                         break;
6108                 case COLOR_DEPTH_101010:
6109                         normalized_clk = (normalized_clk * 30) / 24;
6110                         break;
6111                 case COLOR_DEPTH_121212:
6112                         normalized_clk = (normalized_clk * 36) / 24;
6113                         break;
6114                 case COLOR_DEPTH_161616:
6115                         normalized_clk = (normalized_clk * 48) / 24;
6116                         break;
6117                 default:
6118                         /* The above depths are the only ones valid for HDMI. */
6119                         return false;
6120                 }
6121                 if (normalized_clk <= info->max_tmds_clock) {
6122                         timing_out->display_color_depth = depth;
6123                         return true;
6124                 }
6125         } while (--depth > COLOR_DEPTH_666);
6126         return false;
6127 }
6128
6129 static void fill_stream_properties_from_drm_display_mode(
6130         struct dc_stream_state *stream,
6131         const struct drm_display_mode *mode_in,
6132         const struct drm_connector *connector,
6133         const struct drm_connector_state *connector_state,
6134         const struct dc_stream_state *old_stream,
6135         int requested_bpc)
6136 {
6137         struct dc_crtc_timing *timing_out = &stream->timing;
6138         const struct drm_display_info *info = &connector->display_info;
6139         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6140         struct hdmi_vendor_infoframe hv_frame;
6141         struct hdmi_avi_infoframe avi_frame;
6142
6143         memset(&hv_frame, 0, sizeof(hv_frame));
6144         memset(&avi_frame, 0, sizeof(avi_frame));
6145
6146         timing_out->h_border_left = 0;
6147         timing_out->h_border_right = 0;
6148         timing_out->v_border_top = 0;
6149         timing_out->v_border_bottom = 0;
6150         /* TODO: un-hardcode */
6151         if (drm_mode_is_420_only(info, mode_in)
6152                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6153                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6154         else if (drm_mode_is_420_also(info, mode_in)
6155                         && aconnector->force_yuv420_output)
6156                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6157         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6158                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6159                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6160         else
6161                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6162
6163         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6164         timing_out->display_color_depth = convert_color_depth_from_display_info(
6165                 connector,
6166                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6167                 requested_bpc);
6168         timing_out->scan_type = SCANNING_TYPE_NODATA;
6169         timing_out->hdmi_vic = 0;
6170
6171         if(old_stream) {
6172                 timing_out->vic = old_stream->timing.vic;
6173                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6174                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6175         } else {
6176                 timing_out->vic = drm_match_cea_mode(mode_in);
6177                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6178                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6179                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6180                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6181         }
6182
6183         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6184                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6185                 timing_out->vic = avi_frame.video_code;
6186                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6187                 timing_out->hdmi_vic = hv_frame.vic;
6188         }
6189
6190         if (is_freesync_video_mode(mode_in, aconnector)) {
6191                 timing_out->h_addressable = mode_in->hdisplay;
6192                 timing_out->h_total = mode_in->htotal;
6193                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6194                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6195                 timing_out->v_total = mode_in->vtotal;
6196                 timing_out->v_addressable = mode_in->vdisplay;
6197                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6198                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6199                 timing_out->pix_clk_100hz = mode_in->clock * 10;
6200         } else {
6201                 timing_out->h_addressable = mode_in->crtc_hdisplay;
6202                 timing_out->h_total = mode_in->crtc_htotal;
6203                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6204                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6205                 timing_out->v_total = mode_in->crtc_vtotal;
6206                 timing_out->v_addressable = mode_in->crtc_vdisplay;
6207                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6208                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6209                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6210         }
6211
6212         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6213
6214         stream->output_color_space = get_output_color_space(timing_out);
6215
6216         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6217         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6218         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6219                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6220                     drm_mode_is_420_also(info, mode_in) &&
6221                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6222                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6223                         adjust_colour_depth_from_display_info(timing_out, info);
6224                 }
6225         }
6226 }
6227
6228 static void fill_audio_info(struct audio_info *audio_info,
6229                             const struct drm_connector *drm_connector,
6230                             const struct dc_sink *dc_sink)
6231 {
6232         int i = 0;
6233         int cea_revision = 0;
6234         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6235
6236         audio_info->manufacture_id = edid_caps->manufacturer_id;
6237         audio_info->product_id = edid_caps->product_id;
6238
6239         cea_revision = drm_connector->display_info.cea_rev;
6240
6241         strscpy(audio_info->display_name,
6242                 edid_caps->display_name,
6243                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6244
6245         if (cea_revision >= 3) {
6246                 audio_info->mode_count = edid_caps->audio_mode_count;
6247
6248                 for (i = 0; i < audio_info->mode_count; ++i) {
6249                         audio_info->modes[i].format_code =
6250                                         (enum audio_format_code)
6251                                         (edid_caps->audio_modes[i].format_code);
6252                         audio_info->modes[i].channel_count =
6253                                         edid_caps->audio_modes[i].channel_count;
6254                         audio_info->modes[i].sample_rates.all =
6255                                         edid_caps->audio_modes[i].sample_rate;
6256                         audio_info->modes[i].sample_size =
6257                                         edid_caps->audio_modes[i].sample_size;
6258                 }
6259         }
6260
6261         audio_info->flags.all = edid_caps->speaker_flags;
6262
6263         /* TODO: We only check for the progressive mode, check for interlace mode too */
6264         if (drm_connector->latency_present[0]) {
6265                 audio_info->video_latency = drm_connector->video_latency[0];
6266                 audio_info->audio_latency = drm_connector->audio_latency[0];
6267         }
6268
6269         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6270
6271 }
6272
6273 static void
6274 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6275                                       struct drm_display_mode *dst_mode)
6276 {
6277         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6278         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6279         dst_mode->crtc_clock = src_mode->crtc_clock;
6280         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6281         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6282         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6283         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6284         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6285         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6286         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6287         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6288         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6289         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6290         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6291 }
6292
6293 static void
6294 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6295                                         const struct drm_display_mode *native_mode,
6296                                         bool scale_enabled)
6297 {
6298         if (scale_enabled) {
6299                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6300         } else if (native_mode->clock == drm_mode->clock &&
6301                         native_mode->htotal == drm_mode->htotal &&
6302                         native_mode->vtotal == drm_mode->vtotal) {
6303                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6304         } else {
6305                 /* no scaling nor amdgpu inserted, no need to patch */
6306         }
6307 }
6308
6309 static struct dc_sink *
6310 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6311 {
6312         struct dc_sink_init_data sink_init_data = { 0 };
6313         struct dc_sink *sink = NULL;
6314         sink_init_data.link = aconnector->dc_link;
6315         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6316
6317         sink = dc_sink_create(&sink_init_data);
6318         if (!sink) {
6319                 DRM_ERROR("Failed to create sink!\n");
6320                 return NULL;
6321         }
6322         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6323
6324         return sink;
6325 }
6326
6327 static void set_multisync_trigger_params(
6328                 struct dc_stream_state *stream)
6329 {
6330         struct dc_stream_state *master = NULL;
6331
6332         if (stream->triggered_crtc_reset.enabled) {
6333                 master = stream->triggered_crtc_reset.event_source;
6334                 stream->triggered_crtc_reset.event =
6335                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6336                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6337                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6338         }
6339 }
6340
6341 static void set_master_stream(struct dc_stream_state *stream_set[],
6342                               int stream_count)
6343 {
6344         int j, highest_rfr = 0, master_stream = 0;
6345
6346         for (j = 0;  j < stream_count; j++) {
6347                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6348                         int refresh_rate = 0;
6349
6350                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6351                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6352                         if (refresh_rate > highest_rfr) {
6353                                 highest_rfr = refresh_rate;
6354                                 master_stream = j;
6355                         }
6356                 }
6357         }
6358         for (j = 0;  j < stream_count; j++) {
6359                 if (stream_set[j])
6360                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6361         }
6362 }
6363
6364 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6365 {
6366         int i = 0;
6367         struct dc_stream_state *stream;
6368
6369         if (context->stream_count < 2)
6370                 return;
6371         for (i = 0; i < context->stream_count ; i++) {
6372                 if (!context->streams[i])
6373                         continue;
6374                 /*
6375                  * TODO: add a function to read AMD VSDB bits and set
6376                  * crtc_sync_master.multi_sync_enabled flag
6377                  * For now it's set to false
6378                  */
6379         }
6380
6381         set_master_stream(context->streams, context->stream_count);
6382
6383         for (i = 0; i < context->stream_count ; i++) {
6384                 stream = context->streams[i];
6385
6386                 if (!stream)
6387                         continue;
6388
6389                 set_multisync_trigger_params(stream);
6390         }
6391 }
6392
6393 #if defined(CONFIG_DRM_AMD_DC_DCN)
6394 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6395                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6396                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6397 {
6398         stream->timing.flags.DSC = 0;
6399         dsc_caps->is_dsc_supported = false;
6400
6401         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6402                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6403                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6404                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6405                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6406                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6407                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6408                                 dsc_caps);
6409         }
6410 }
6411
6412 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6413                                     struct dc_sink *sink, struct dc_stream_state *stream,
6414                                     struct dsc_dec_dpcd_caps *dsc_caps,
6415                                     uint32_t max_dsc_target_bpp_limit_override)
6416 {
6417         const struct dc_link_settings *verified_link_cap = NULL;
6418         uint32_t link_bw_in_kbps;
6419         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6420         struct dc *dc = sink->ctx->dc;
6421         struct dc_dsc_bw_range bw_range = {0};
6422         struct dc_dsc_config dsc_cfg = {0};
6423
6424         verified_link_cap = dc_link_get_link_cap(stream->link);
6425         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6426         edp_min_bpp_x16 = 8 * 16;
6427         edp_max_bpp_x16 = 8 * 16;
6428
6429         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6430                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6431
6432         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6433                 edp_min_bpp_x16 = edp_max_bpp_x16;
6434
6435         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6436                                 dc->debug.dsc_min_slice_height_override,
6437                                 edp_min_bpp_x16, edp_max_bpp_x16,
6438                                 dsc_caps,
6439                                 &stream->timing,
6440                                 &bw_range)) {
6441
6442                 if (bw_range.max_kbps < link_bw_in_kbps) {
6443                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6444                                         dsc_caps,
6445                                         dc->debug.dsc_min_slice_height_override,
6446                                         max_dsc_target_bpp_limit_override,
6447                                         0,
6448                                         &stream->timing,
6449                                         &dsc_cfg)) {
6450                                 stream->timing.dsc_cfg = dsc_cfg;
6451                                 stream->timing.flags.DSC = 1;
6452                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6453                         }
6454                         return;
6455                 }
6456         }
6457
6458         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6459                                 dsc_caps,
6460                                 dc->debug.dsc_min_slice_height_override,
6461                                 max_dsc_target_bpp_limit_override,
6462                                 link_bw_in_kbps,
6463                                 &stream->timing,
6464                                 &dsc_cfg)) {
6465                 stream->timing.dsc_cfg = dsc_cfg;
6466                 stream->timing.flags.DSC = 1;
6467         }
6468 }
6469
6470 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6471                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6472                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6473 {
6474         struct drm_connector *drm_connector = &aconnector->base;
6475         uint32_t link_bandwidth_kbps;
6476         uint32_t max_dsc_target_bpp_limit_override = 0;
6477         struct dc *dc = sink->ctx->dc;
6478         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6479         uint32_t dsc_max_supported_bw_in_kbps;
6480
6481         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6482                                                         dc_link_get_link_cap(aconnector->dc_link));
6483
6484         if (stream->link && stream->link->local_sink)
6485                 max_dsc_target_bpp_limit_override =
6486                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6487
6488         /* Set DSC policy according to dsc_clock_en */
6489         dc_dsc_policy_set_enable_dsc_when_not_needed(
6490                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6491
6492         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6493             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6494
6495                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6496
6497         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6498                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6499                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6500                                                 dsc_caps,
6501                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6502                                                 max_dsc_target_bpp_limit_override,
6503                                                 link_bandwidth_kbps,
6504                                                 &stream->timing,
6505                                                 &stream->timing.dsc_cfg)) {
6506                                 stream->timing.flags.DSC = 1;
6507                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6508                                                                  __func__, drm_connector->name);
6509                         }
6510                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6511                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6512                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6513                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6514
6515                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6516                                         max_supported_bw_in_kbps > 0 &&
6517                                         dsc_max_supported_bw_in_kbps > 0)
6518                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6519                                                 dsc_caps,
6520                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6521                                                 max_dsc_target_bpp_limit_override,
6522                                                 dsc_max_supported_bw_in_kbps,
6523                                                 &stream->timing,
6524                                                 &stream->timing.dsc_cfg)) {
6525                                         stream->timing.flags.DSC = 1;
6526                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6527                                                                          __func__, drm_connector->name);
6528                                 }
6529                 }
6530         }
6531
6532         /* Overwrite the stream flag if DSC is enabled through debugfs */
6533         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6534                 stream->timing.flags.DSC = 1;
6535
6536         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6537                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6538
6539         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6540                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6541
6542         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6543                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6544 }
6545 #endif /* CONFIG_DRM_AMD_DC_DCN */
6546
6547 /**
6548  * DOC: FreeSync Video
6549  *
6550  * When a userspace application wants to play a video, the content follows a
6551  * standard format definition that usually specifies the FPS for that format.
6552  * The below list illustrates some video format and the expected FPS,
6553  * respectively:
6554  *
6555  * - TV/NTSC (23.976 FPS)
6556  * - Cinema (24 FPS)
6557  * - TV/PAL (25 FPS)
6558  * - TV/NTSC (29.97 FPS)
6559  * - TV/NTSC (30 FPS)
6560  * - Cinema HFR (48 FPS)
6561  * - TV/PAL (50 FPS)
6562  * - Commonly used (60 FPS)
6563  * - Multiples of 24 (48,72,96,120 FPS)
6564  *
6565  * The list of standards video format is not huge and can be added to the
6566  * connector modeset list beforehand. With that, userspace can leverage
6567  * FreeSync to extends the front porch in order to attain the target refresh
6568  * rate. Such a switch will happen seamlessly, without screen blanking or
6569  * reprogramming of the output in any other way. If the userspace requests a
6570  * modesetting change compatible with FreeSync modes that only differ in the
6571  * refresh rate, DC will skip the full update and avoid blink during the
6572  * transition. For example, the video player can change the modesetting from
6573  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6574  * causing any display blink. This same concept can be applied to a mode
6575  * setting change.
6576  */
6577 static struct drm_display_mode *
6578 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6579                           bool use_probed_modes)
6580 {
6581         struct drm_display_mode *m, *m_pref = NULL;
6582         u16 current_refresh, highest_refresh;
6583         struct list_head *list_head = use_probed_modes ?
6584                                                     &aconnector->base.probed_modes :
6585                                                     &aconnector->base.modes;
6586
6587         if (aconnector->freesync_vid_base.clock != 0)
6588                 return &aconnector->freesync_vid_base;
6589
6590         /* Find the preferred mode */
6591         list_for_each_entry (m, list_head, head) {
6592                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6593                         m_pref = m;
6594                         break;
6595                 }
6596         }
6597
6598         if (!m_pref) {
6599                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6600                 m_pref = list_first_entry_or_null(
6601                         &aconnector->base.modes, struct drm_display_mode, head);
6602                 if (!m_pref) {
6603                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6604                         return NULL;
6605                 }
6606         }
6607
6608         highest_refresh = drm_mode_vrefresh(m_pref);
6609
6610         /*
6611          * Find the mode with highest refresh rate with same resolution.
6612          * For some monitors, preferred mode is not the mode with highest
6613          * supported refresh rate.
6614          */
6615         list_for_each_entry (m, list_head, head) {
6616                 current_refresh  = drm_mode_vrefresh(m);
6617
6618                 if (m->hdisplay == m_pref->hdisplay &&
6619                     m->vdisplay == m_pref->vdisplay &&
6620                     highest_refresh < current_refresh) {
6621                         highest_refresh = current_refresh;
6622                         m_pref = m;
6623                 }
6624         }
6625
6626         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6627         return m_pref;
6628 }
6629
6630 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6631                                    struct amdgpu_dm_connector *aconnector)
6632 {
6633         struct drm_display_mode *high_mode;
6634         int timing_diff;
6635
6636         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6637         if (!high_mode || !mode)
6638                 return false;
6639
6640         timing_diff = high_mode->vtotal - mode->vtotal;
6641
6642         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6643             high_mode->hdisplay != mode->hdisplay ||
6644             high_mode->vdisplay != mode->vdisplay ||
6645             high_mode->hsync_start != mode->hsync_start ||
6646             high_mode->hsync_end != mode->hsync_end ||
6647             high_mode->htotal != mode->htotal ||
6648             high_mode->hskew != mode->hskew ||
6649             high_mode->vscan != mode->vscan ||
6650             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6651             high_mode->vsync_end - mode->vsync_end != timing_diff)
6652                 return false;
6653         else
6654                 return true;
6655 }
6656
6657 static struct dc_stream_state *
6658 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6659                        const struct drm_display_mode *drm_mode,
6660                        const struct dm_connector_state *dm_state,
6661                        const struct dc_stream_state *old_stream,
6662                        int requested_bpc)
6663 {
6664         struct drm_display_mode *preferred_mode = NULL;
6665         struct drm_connector *drm_connector;
6666         const struct drm_connector_state *con_state =
6667                 dm_state ? &dm_state->base : NULL;
6668         struct dc_stream_state *stream = NULL;
6669         struct drm_display_mode mode = *drm_mode;
6670         struct drm_display_mode saved_mode;
6671         struct drm_display_mode *freesync_mode = NULL;
6672         bool native_mode_found = false;
6673         bool recalculate_timing = false;
6674         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6675         int mode_refresh;
6676         int preferred_refresh = 0;
6677 #if defined(CONFIG_DRM_AMD_DC_DCN)
6678         struct dsc_dec_dpcd_caps dsc_caps;
6679 #endif
6680         struct dc_sink *sink = NULL;
6681
6682         memset(&saved_mode, 0, sizeof(saved_mode));
6683
6684         if (aconnector == NULL) {
6685                 DRM_ERROR("aconnector is NULL!\n");
6686                 return stream;
6687         }
6688
6689         drm_connector = &aconnector->base;
6690
6691         if (!aconnector->dc_sink) {
6692                 sink = create_fake_sink(aconnector);
6693                 if (!sink)
6694                         return stream;
6695         } else {
6696                 sink = aconnector->dc_sink;
6697                 dc_sink_retain(sink);
6698         }
6699
6700         stream = dc_create_stream_for_sink(sink);
6701
6702         if (stream == NULL) {
6703                 DRM_ERROR("Failed to create stream for sink!\n");
6704                 goto finish;
6705         }
6706
6707         stream->dm_stream_context = aconnector;
6708
6709         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6710                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6711
6712         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6713                 /* Search for preferred mode */
6714                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6715                         native_mode_found = true;
6716                         break;
6717                 }
6718         }
6719         if (!native_mode_found)
6720                 preferred_mode = list_first_entry_or_null(
6721                                 &aconnector->base.modes,
6722                                 struct drm_display_mode,
6723                                 head);
6724
6725         mode_refresh = drm_mode_vrefresh(&mode);
6726
6727         if (preferred_mode == NULL) {
6728                 /*
6729                  * This may not be an error, the use case is when we have no
6730                  * usermode calls to reset and set mode upon hotplug. In this
6731                  * case, we call set mode ourselves to restore the previous mode
6732                  * and the modelist may not be filled in in time.
6733                  */
6734                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6735         } else {
6736                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6737                 if (recalculate_timing) {
6738                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6739                         drm_mode_copy(&saved_mode, &mode);
6740                         drm_mode_copy(&mode, freesync_mode);
6741                 } else {
6742                         decide_crtc_timing_for_drm_display_mode(
6743                                 &mode, preferred_mode, scale);
6744
6745                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6746                 }
6747         }
6748
6749         if (recalculate_timing)
6750                 drm_mode_set_crtcinfo(&saved_mode, 0);
6751         else if (!dm_state)
6752                 drm_mode_set_crtcinfo(&mode, 0);
6753
6754        /*
6755         * If scaling is enabled and refresh rate didn't change
6756         * we copy the vic and polarities of the old timings
6757         */
6758         if (!scale || mode_refresh != preferred_refresh)
6759                 fill_stream_properties_from_drm_display_mode(
6760                         stream, &mode, &aconnector->base, con_state, NULL,
6761                         requested_bpc);
6762         else
6763                 fill_stream_properties_from_drm_display_mode(
6764                         stream, &mode, &aconnector->base, con_state, old_stream,
6765                         requested_bpc);
6766
6767 #if defined(CONFIG_DRM_AMD_DC_DCN)
6768         /* SST DSC determination policy */
6769         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6770         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6771                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6772 #endif
6773
6774         update_stream_scaling_settings(&mode, dm_state, stream);
6775
6776         fill_audio_info(
6777                 &stream->audio_info,
6778                 drm_connector,
6779                 sink);
6780
6781         update_stream_signal(stream, sink);
6782
6783         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6784                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6785
6786         if (stream->link->psr_settings.psr_feature_enabled) {
6787                 //
6788                 // should decide stream support vsc sdp colorimetry capability
6789                 // before building vsc info packet
6790                 //
6791                 stream->use_vsc_sdp_for_colorimetry = false;
6792                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6793                         stream->use_vsc_sdp_for_colorimetry =
6794                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6795                 } else {
6796                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6797                                 stream->use_vsc_sdp_for_colorimetry = true;
6798                 }
6799                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6800                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6801
6802         }
6803 finish:
6804         dc_sink_release(sink);
6805
6806         return stream;
6807 }
6808
6809 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6810 {
6811         drm_crtc_cleanup(crtc);
6812         kfree(crtc);
6813 }
6814
6815 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6816                                   struct drm_crtc_state *state)
6817 {
6818         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6819
6820         /* TODO Destroy dc_stream objects are stream object is flattened */
6821         if (cur->stream)
6822                 dc_stream_release(cur->stream);
6823
6824
6825         __drm_atomic_helper_crtc_destroy_state(state);
6826
6827
6828         kfree(state);
6829 }
6830
6831 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6832 {
6833         struct dm_crtc_state *state;
6834
6835         if (crtc->state)
6836                 dm_crtc_destroy_state(crtc, crtc->state);
6837
6838         state = kzalloc(sizeof(*state), GFP_KERNEL);
6839         if (WARN_ON(!state))
6840                 return;
6841
6842         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6843 }
6844
6845 static struct drm_crtc_state *
6846 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6847 {
6848         struct dm_crtc_state *state, *cur;
6849
6850         cur = to_dm_crtc_state(crtc->state);
6851
6852         if (WARN_ON(!crtc->state))
6853                 return NULL;
6854
6855         state = kzalloc(sizeof(*state), GFP_KERNEL);
6856         if (!state)
6857                 return NULL;
6858
6859         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6860
6861         if (cur->stream) {
6862                 state->stream = cur->stream;
6863                 dc_stream_retain(state->stream);
6864         }
6865
6866         state->active_planes = cur->active_planes;
6867         state->vrr_infopacket = cur->vrr_infopacket;
6868         state->abm_level = cur->abm_level;
6869         state->vrr_supported = cur->vrr_supported;
6870         state->freesync_config = cur->freesync_config;
6871         state->cm_has_degamma = cur->cm_has_degamma;
6872         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6873         state->mpo_requested = cur->mpo_requested;
6874         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6875
6876         return &state->base;
6877 }
6878
6879 #ifdef CONFIG_DEBUG_FS
6880 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6881 {
6882         crtc_debugfs_init(crtc);
6883
6884         return 0;
6885 }
6886 #endif
6887
6888 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6889 {
6890         enum dc_irq_source irq_source;
6891         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6892         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6893         int rc;
6894
6895         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6896
6897         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6898
6899         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6900                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6901         return rc;
6902 }
6903
6904 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6905 {
6906         enum dc_irq_source irq_source;
6907         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6908         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6909         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6910         struct amdgpu_display_manager *dm = &adev->dm;
6911         struct vblank_control_work *work;
6912         int rc = 0;
6913
6914         if (enable) {
6915                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6916                 if (amdgpu_dm_vrr_active(acrtc_state))
6917                         rc = dm_set_vupdate_irq(crtc, true);
6918         } else {
6919                 /* vblank irq off -> vupdate irq off */
6920                 rc = dm_set_vupdate_irq(crtc, false);
6921         }
6922
6923         if (rc)
6924                 return rc;
6925
6926         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6927
6928         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6929                 return -EBUSY;
6930
6931         if (amdgpu_in_reset(adev))
6932                 return 0;
6933
6934         if (dm->vblank_control_workqueue) {
6935                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6936                 if (!work)
6937                         return -ENOMEM;
6938
6939                 INIT_WORK(&work->work, vblank_control_worker);
6940                 work->dm = dm;
6941                 work->acrtc = acrtc;
6942                 work->enable = enable;
6943
6944                 if (acrtc_state->stream) {
6945                         dc_stream_retain(acrtc_state->stream);
6946                         work->stream = acrtc_state->stream;
6947                 }
6948
6949                 queue_work(dm->vblank_control_workqueue, &work->work);
6950         }
6951
6952         return 0;
6953 }
6954
6955 static int dm_enable_vblank(struct drm_crtc *crtc)
6956 {
6957         return dm_set_vblank(crtc, true);
6958 }
6959
6960 static void dm_disable_vblank(struct drm_crtc *crtc)
6961 {
6962         dm_set_vblank(crtc, false);
6963 }
6964
6965 /* Implemented only the options currently available for the driver */
6966 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6967         .reset = dm_crtc_reset_state,
6968         .destroy = amdgpu_dm_crtc_destroy,
6969         .set_config = drm_atomic_helper_set_config,
6970         .page_flip = drm_atomic_helper_page_flip,
6971         .atomic_duplicate_state = dm_crtc_duplicate_state,
6972         .atomic_destroy_state = dm_crtc_destroy_state,
6973         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6974         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6975         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6976         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6977         .enable_vblank = dm_enable_vblank,
6978         .disable_vblank = dm_disable_vblank,
6979         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6980 #if defined(CONFIG_DEBUG_FS)
6981         .late_register = amdgpu_dm_crtc_late_register,
6982 #endif
6983 };
6984
6985 static enum drm_connector_status
6986 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6987 {
6988         bool connected;
6989         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6990
6991         /*
6992          * Notes:
6993          * 1. This interface is NOT called in context of HPD irq.
6994          * 2. This interface *is called* in context of user-mode ioctl. Which
6995          * makes it a bad place for *any* MST-related activity.
6996          */
6997
6998         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6999             !aconnector->fake_enable)
7000                 connected = (aconnector->dc_sink != NULL);
7001         else
7002                 connected = (aconnector->base.force == DRM_FORCE_ON);
7003
7004         update_subconnector_property(aconnector);
7005
7006         return (connected ? connector_status_connected :
7007                         connector_status_disconnected);
7008 }
7009
7010 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
7011                                             struct drm_connector_state *connector_state,
7012                                             struct drm_property *property,
7013                                             uint64_t val)
7014 {
7015         struct drm_device *dev = connector->dev;
7016         struct amdgpu_device *adev = drm_to_adev(dev);
7017         struct dm_connector_state *dm_old_state =
7018                 to_dm_connector_state(connector->state);
7019         struct dm_connector_state *dm_new_state =
7020                 to_dm_connector_state(connector_state);
7021
7022         int ret = -EINVAL;
7023
7024         if (property == dev->mode_config.scaling_mode_property) {
7025                 enum amdgpu_rmx_type rmx_type;
7026
7027                 switch (val) {
7028                 case DRM_MODE_SCALE_CENTER:
7029                         rmx_type = RMX_CENTER;
7030                         break;
7031                 case DRM_MODE_SCALE_ASPECT:
7032                         rmx_type = RMX_ASPECT;
7033                         break;
7034                 case DRM_MODE_SCALE_FULLSCREEN:
7035                         rmx_type = RMX_FULL;
7036                         break;
7037                 case DRM_MODE_SCALE_NONE:
7038                 default:
7039                         rmx_type = RMX_OFF;
7040                         break;
7041                 }
7042
7043                 if (dm_old_state->scaling == rmx_type)
7044                         return 0;
7045
7046                 dm_new_state->scaling = rmx_type;
7047                 ret = 0;
7048         } else if (property == adev->mode_info.underscan_hborder_property) {
7049                 dm_new_state->underscan_hborder = val;
7050                 ret = 0;
7051         } else if (property == adev->mode_info.underscan_vborder_property) {
7052                 dm_new_state->underscan_vborder = val;
7053                 ret = 0;
7054         } else if (property == adev->mode_info.underscan_property) {
7055                 dm_new_state->underscan_enable = val;
7056                 ret = 0;
7057         } else if (property == adev->mode_info.abm_level_property) {
7058                 dm_new_state->abm_level = val;
7059                 ret = 0;
7060         }
7061
7062         return ret;
7063 }
7064
7065 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7066                                             const struct drm_connector_state *state,
7067                                             struct drm_property *property,
7068                                             uint64_t *val)
7069 {
7070         struct drm_device *dev = connector->dev;
7071         struct amdgpu_device *adev = drm_to_adev(dev);
7072         struct dm_connector_state *dm_state =
7073                 to_dm_connector_state(state);
7074         int ret = -EINVAL;
7075
7076         if (property == dev->mode_config.scaling_mode_property) {
7077                 switch (dm_state->scaling) {
7078                 case RMX_CENTER:
7079                         *val = DRM_MODE_SCALE_CENTER;
7080                         break;
7081                 case RMX_ASPECT:
7082                         *val = DRM_MODE_SCALE_ASPECT;
7083                         break;
7084                 case RMX_FULL:
7085                         *val = DRM_MODE_SCALE_FULLSCREEN;
7086                         break;
7087                 case RMX_OFF:
7088                 default:
7089                         *val = DRM_MODE_SCALE_NONE;
7090                         break;
7091                 }
7092                 ret = 0;
7093         } else if (property == adev->mode_info.underscan_hborder_property) {
7094                 *val = dm_state->underscan_hborder;
7095                 ret = 0;
7096         } else if (property == adev->mode_info.underscan_vborder_property) {
7097                 *val = dm_state->underscan_vborder;
7098                 ret = 0;
7099         } else if (property == adev->mode_info.underscan_property) {
7100                 *val = dm_state->underscan_enable;
7101                 ret = 0;
7102         } else if (property == adev->mode_info.abm_level_property) {
7103                 *val = dm_state->abm_level;
7104                 ret = 0;
7105         }
7106
7107         return ret;
7108 }
7109
7110 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7111 {
7112         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7113
7114         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7115 }
7116
7117 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7118 {
7119         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7120         const struct dc_link *link = aconnector->dc_link;
7121         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7122         struct amdgpu_display_manager *dm = &adev->dm;
7123         int i;
7124
7125         /*
7126          * Call only if mst_mgr was iniitalized before since it's not done
7127          * for all connector types.
7128          */
7129         if (aconnector->mst_mgr.dev)
7130                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7131
7132         for (i = 0; i < dm->num_of_edps; i++) {
7133                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7134                         backlight_device_unregister(dm->backlight_dev[i]);
7135                         dm->backlight_dev[i] = NULL;
7136                 }
7137         }
7138
7139         if (aconnector->dc_em_sink)
7140                 dc_sink_release(aconnector->dc_em_sink);
7141         aconnector->dc_em_sink = NULL;
7142         if (aconnector->dc_sink)
7143                 dc_sink_release(aconnector->dc_sink);
7144         aconnector->dc_sink = NULL;
7145
7146         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7147         drm_connector_unregister(connector);
7148         drm_connector_cleanup(connector);
7149         if (aconnector->i2c) {
7150                 i2c_del_adapter(&aconnector->i2c->base);
7151                 kfree(aconnector->i2c);
7152         }
7153         kfree(aconnector->dm_dp_aux.aux.name);
7154
7155         kfree(connector);
7156 }
7157
7158 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7159 {
7160         struct dm_connector_state *state =
7161                 to_dm_connector_state(connector->state);
7162
7163         if (connector->state)
7164                 __drm_atomic_helper_connector_destroy_state(connector->state);
7165
7166         kfree(state);
7167
7168         state = kzalloc(sizeof(*state), GFP_KERNEL);
7169
7170         if (state) {
7171                 state->scaling = RMX_OFF;
7172                 state->underscan_enable = false;
7173                 state->underscan_hborder = 0;
7174                 state->underscan_vborder = 0;
7175                 state->base.max_requested_bpc = 8;
7176                 state->vcpi_slots = 0;
7177                 state->pbn = 0;
7178                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7179                         state->abm_level = amdgpu_dm_abm_level;
7180
7181                 __drm_atomic_helper_connector_reset(connector, &state->base);
7182         }
7183 }
7184
7185 struct drm_connector_state *
7186 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7187 {
7188         struct dm_connector_state *state =
7189                 to_dm_connector_state(connector->state);
7190
7191         struct dm_connector_state *new_state =
7192                         kmemdup(state, sizeof(*state), GFP_KERNEL);
7193
7194         if (!new_state)
7195                 return NULL;
7196
7197         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7198
7199         new_state->freesync_capable = state->freesync_capable;
7200         new_state->abm_level = state->abm_level;
7201         new_state->scaling = state->scaling;
7202         new_state->underscan_enable = state->underscan_enable;
7203         new_state->underscan_hborder = state->underscan_hborder;
7204         new_state->underscan_vborder = state->underscan_vborder;
7205         new_state->vcpi_slots = state->vcpi_slots;
7206         new_state->pbn = state->pbn;
7207         return &new_state->base;
7208 }
7209
7210 static int
7211 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7212 {
7213         struct amdgpu_dm_connector *amdgpu_dm_connector =
7214                 to_amdgpu_dm_connector(connector);
7215         int r;
7216
7217         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7218             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7219                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7220                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7221                 if (r)
7222                         return r;
7223         }
7224
7225 #if defined(CONFIG_DEBUG_FS)
7226         connector_debugfs_init(amdgpu_dm_connector);
7227 #endif
7228
7229         return 0;
7230 }
7231
7232 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7233         .reset = amdgpu_dm_connector_funcs_reset,
7234         .detect = amdgpu_dm_connector_detect,
7235         .fill_modes = drm_helper_probe_single_connector_modes,
7236         .destroy = amdgpu_dm_connector_destroy,
7237         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7238         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7239         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7240         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7241         .late_register = amdgpu_dm_connector_late_register,
7242         .early_unregister = amdgpu_dm_connector_unregister
7243 };
7244
7245 static int get_modes(struct drm_connector *connector)
7246 {
7247         return amdgpu_dm_connector_get_modes(connector);
7248 }
7249
7250 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7251 {
7252         struct dc_sink_init_data init_params = {
7253                         .link = aconnector->dc_link,
7254                         .sink_signal = SIGNAL_TYPE_VIRTUAL
7255         };
7256         struct edid *edid;
7257
7258         if (!aconnector->base.edid_blob_ptr) {
7259                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7260                                 aconnector->base.name);
7261
7262                 aconnector->base.force = DRM_FORCE_OFF;
7263                 aconnector->base.override_edid = false;
7264                 return;
7265         }
7266
7267         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7268
7269         aconnector->edid = edid;
7270
7271         aconnector->dc_em_sink = dc_link_add_remote_sink(
7272                 aconnector->dc_link,
7273                 (uint8_t *)edid,
7274                 (edid->extensions + 1) * EDID_LENGTH,
7275                 &init_params);
7276
7277         if (aconnector->base.force == DRM_FORCE_ON) {
7278                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7279                 aconnector->dc_link->local_sink :
7280                 aconnector->dc_em_sink;
7281                 dc_sink_retain(aconnector->dc_sink);
7282         }
7283 }
7284
7285 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7286 {
7287         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7288
7289         /*
7290          * In case of headless boot with force on for DP managed connector
7291          * Those settings have to be != 0 to get initial modeset
7292          */
7293         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7294                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7295                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7296         }
7297
7298
7299         aconnector->base.override_edid = true;
7300         create_eml_sink(aconnector);
7301 }
7302
7303 struct dc_stream_state *
7304 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7305                                 const struct drm_display_mode *drm_mode,
7306                                 const struct dm_connector_state *dm_state,
7307                                 const struct dc_stream_state *old_stream)
7308 {
7309         struct drm_connector *connector = &aconnector->base;
7310         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7311         struct dc_stream_state *stream;
7312         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7313         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7314         enum dc_status dc_result = DC_OK;
7315
7316         do {
7317                 stream = create_stream_for_sink(aconnector, drm_mode,
7318                                                 dm_state, old_stream,
7319                                                 requested_bpc);
7320                 if (stream == NULL) {
7321                         DRM_ERROR("Failed to create stream for sink!\n");
7322                         break;
7323                 }
7324
7325                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7326                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7327                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7328
7329                 if (dc_result != DC_OK) {
7330                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7331                                       drm_mode->hdisplay,
7332                                       drm_mode->vdisplay,
7333                                       drm_mode->clock,
7334                                       dc_result,
7335                                       dc_status_to_str(dc_result));
7336
7337                         dc_stream_release(stream);
7338                         stream = NULL;
7339                         requested_bpc -= 2; /* lower bpc to retry validation */
7340                 }
7341
7342         } while (stream == NULL && requested_bpc >= 6);
7343
7344         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7345                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7346
7347                 aconnector->force_yuv420_output = true;
7348                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7349                                                 dm_state, old_stream);
7350                 aconnector->force_yuv420_output = false;
7351         }
7352
7353         return stream;
7354 }
7355
7356 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7357                                    struct drm_display_mode *mode)
7358 {
7359         int result = MODE_ERROR;
7360         struct dc_sink *dc_sink;
7361         /* TODO: Unhardcode stream count */
7362         struct dc_stream_state *stream;
7363         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7364
7365         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7366                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7367                 return result;
7368
7369         /*
7370          * Only run this the first time mode_valid is called to initilialize
7371          * EDID mgmt
7372          */
7373         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7374                 !aconnector->dc_em_sink)
7375                 handle_edid_mgmt(aconnector);
7376
7377         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7378
7379         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7380                                 aconnector->base.force != DRM_FORCE_ON) {
7381                 DRM_ERROR("dc_sink is NULL!\n");
7382                 goto fail;
7383         }
7384
7385         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7386         if (stream) {
7387                 dc_stream_release(stream);
7388                 result = MODE_OK;
7389         }
7390
7391 fail:
7392         /* TODO: error handling*/
7393         return result;
7394 }
7395
7396 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7397                                 struct dc_info_packet *out)
7398 {
7399         struct hdmi_drm_infoframe frame;
7400         unsigned char buf[30]; /* 26 + 4 */
7401         ssize_t len;
7402         int ret, i;
7403
7404         memset(out, 0, sizeof(*out));
7405
7406         if (!state->hdr_output_metadata)
7407                 return 0;
7408
7409         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7410         if (ret)
7411                 return ret;
7412
7413         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7414         if (len < 0)
7415                 return (int)len;
7416
7417         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7418         if (len != 30)
7419                 return -EINVAL;
7420
7421         /* Prepare the infopacket for DC. */
7422         switch (state->connector->connector_type) {
7423         case DRM_MODE_CONNECTOR_HDMIA:
7424                 out->hb0 = 0x87; /* type */
7425                 out->hb1 = 0x01; /* version */
7426                 out->hb2 = 0x1A; /* length */
7427                 out->sb[0] = buf[3]; /* checksum */
7428                 i = 1;
7429                 break;
7430
7431         case DRM_MODE_CONNECTOR_DisplayPort:
7432         case DRM_MODE_CONNECTOR_eDP:
7433                 out->hb0 = 0x00; /* sdp id, zero */
7434                 out->hb1 = 0x87; /* type */
7435                 out->hb2 = 0x1D; /* payload len - 1 */
7436                 out->hb3 = (0x13 << 2); /* sdp version */
7437                 out->sb[0] = 0x01; /* version */
7438                 out->sb[1] = 0x1A; /* length */
7439                 i = 2;
7440                 break;
7441
7442         default:
7443                 return -EINVAL;
7444         }
7445
7446         memcpy(&out->sb[i], &buf[4], 26);
7447         out->valid = true;
7448
7449         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7450                        sizeof(out->sb), false);
7451
7452         return 0;
7453 }
7454
7455 static int
7456 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7457                                  struct drm_atomic_state *state)
7458 {
7459         struct drm_connector_state *new_con_state =
7460                 drm_atomic_get_new_connector_state(state, conn);
7461         struct drm_connector_state *old_con_state =
7462                 drm_atomic_get_old_connector_state(state, conn);
7463         struct drm_crtc *crtc = new_con_state->crtc;
7464         struct drm_crtc_state *new_crtc_state;
7465         int ret;
7466
7467         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7468
7469         if (!crtc)
7470                 return 0;
7471
7472         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7473                 struct dc_info_packet hdr_infopacket;
7474
7475                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7476                 if (ret)
7477                         return ret;
7478
7479                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7480                 if (IS_ERR(new_crtc_state))
7481                         return PTR_ERR(new_crtc_state);
7482
7483                 /*
7484                  * DC considers the stream backends changed if the
7485                  * static metadata changes. Forcing the modeset also
7486                  * gives a simple way for userspace to switch from
7487                  * 8bpc to 10bpc when setting the metadata to enter
7488                  * or exit HDR.
7489                  *
7490                  * Changing the static metadata after it's been
7491                  * set is permissible, however. So only force a
7492                  * modeset if we're entering or exiting HDR.
7493                  */
7494                 new_crtc_state->mode_changed =
7495                         !old_con_state->hdr_output_metadata ||
7496                         !new_con_state->hdr_output_metadata;
7497         }
7498
7499         return 0;
7500 }
7501
7502 static const struct drm_connector_helper_funcs
7503 amdgpu_dm_connector_helper_funcs = {
7504         /*
7505          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7506          * modes will be filtered by drm_mode_validate_size(), and those modes
7507          * are missing after user start lightdm. So we need to renew modes list.
7508          * in get_modes call back, not just return the modes count
7509          */
7510         .get_modes = get_modes,
7511         .mode_valid = amdgpu_dm_connector_mode_valid,
7512         .atomic_check = amdgpu_dm_connector_atomic_check,
7513 };
7514
7515 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7516 {
7517 }
7518
7519 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7520 {
7521         struct drm_atomic_state *state = new_crtc_state->state;
7522         struct drm_plane *plane;
7523         int num_active = 0;
7524
7525         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7526                 struct drm_plane_state *new_plane_state;
7527
7528                 /* Cursor planes are "fake". */
7529                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7530                         continue;
7531
7532                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7533
7534                 if (!new_plane_state) {
7535                         /*
7536                          * The plane is enable on the CRTC and hasn't changed
7537                          * state. This means that it previously passed
7538                          * validation and is therefore enabled.
7539                          */
7540                         num_active += 1;
7541                         continue;
7542                 }
7543
7544                 /* We need a framebuffer to be considered enabled. */
7545                 num_active += (new_plane_state->fb != NULL);
7546         }
7547
7548         return num_active;
7549 }
7550
7551 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7552                                          struct drm_crtc_state *new_crtc_state)
7553 {
7554         struct dm_crtc_state *dm_new_crtc_state =
7555                 to_dm_crtc_state(new_crtc_state);
7556
7557         dm_new_crtc_state->active_planes = 0;
7558
7559         if (!dm_new_crtc_state->stream)
7560                 return;
7561
7562         dm_new_crtc_state->active_planes =
7563                 count_crtc_active_planes(new_crtc_state);
7564 }
7565
7566 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7567                                        struct drm_atomic_state *state)
7568 {
7569         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7570                                                                           crtc);
7571         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7572         struct dc *dc = adev->dm.dc;
7573         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7574         int ret = -EINVAL;
7575
7576         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7577
7578         dm_update_crtc_active_planes(crtc, crtc_state);
7579
7580         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7581                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7582                 return ret;
7583         }
7584
7585         /*
7586          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7587          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7588          * planes are disabled, which is not supported by the hardware. And there is legacy
7589          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7590          */
7591         if (crtc_state->enable &&
7592             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7593                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7594                 return -EINVAL;
7595         }
7596
7597         /* In some use cases, like reset, no stream is attached */
7598         if (!dm_crtc_state->stream)
7599                 return 0;
7600
7601         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7602                 return 0;
7603
7604         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7605         return ret;
7606 }
7607
7608 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7609                                       const struct drm_display_mode *mode,
7610                                       struct drm_display_mode *adjusted_mode)
7611 {
7612         return true;
7613 }
7614
7615 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7616         .disable = dm_crtc_helper_disable,
7617         .atomic_check = dm_crtc_helper_atomic_check,
7618         .mode_fixup = dm_crtc_helper_mode_fixup,
7619         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7620 };
7621
7622 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7623 {
7624
7625 }
7626
7627 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7628 {
7629         switch (display_color_depth) {
7630                 case COLOR_DEPTH_666:
7631                         return 6;
7632                 case COLOR_DEPTH_888:
7633                         return 8;
7634                 case COLOR_DEPTH_101010:
7635                         return 10;
7636                 case COLOR_DEPTH_121212:
7637                         return 12;
7638                 case COLOR_DEPTH_141414:
7639                         return 14;
7640                 case COLOR_DEPTH_161616:
7641                         return 16;
7642                 default:
7643                         break;
7644                 }
7645         return 0;
7646 }
7647
7648 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7649                                           struct drm_crtc_state *crtc_state,
7650                                           struct drm_connector_state *conn_state)
7651 {
7652         struct drm_atomic_state *state = crtc_state->state;
7653         struct drm_connector *connector = conn_state->connector;
7654         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7655         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7656         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7657         struct drm_dp_mst_topology_mgr *mst_mgr;
7658         struct drm_dp_mst_port *mst_port;
7659         enum dc_color_depth color_depth;
7660         int clock, bpp = 0;
7661         bool is_y420 = false;
7662
7663         if (!aconnector->port || !aconnector->dc_sink)
7664                 return 0;
7665
7666         mst_port = aconnector->port;
7667         mst_mgr = &aconnector->mst_port->mst_mgr;
7668
7669         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7670                 return 0;
7671
7672         if (!state->duplicated) {
7673                 int max_bpc = conn_state->max_requested_bpc;
7674                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7675                                 aconnector->force_yuv420_output;
7676                 color_depth = convert_color_depth_from_display_info(connector,
7677                                                                     is_y420,
7678                                                                     max_bpc);
7679                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7680                 clock = adjusted_mode->clock;
7681                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7682         }
7683         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7684                                                                            mst_mgr,
7685                                                                            mst_port,
7686                                                                            dm_new_connector_state->pbn,
7687                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7688         if (dm_new_connector_state->vcpi_slots < 0) {
7689                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7690                 return dm_new_connector_state->vcpi_slots;
7691         }
7692         return 0;
7693 }
7694
7695 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7696         .disable = dm_encoder_helper_disable,
7697         .atomic_check = dm_encoder_helper_atomic_check
7698 };
7699
7700 #if defined(CONFIG_DRM_AMD_DC_DCN)
7701 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7702                                             struct dc_state *dc_state,
7703                                             struct dsc_mst_fairness_vars *vars)
7704 {
7705         struct dc_stream_state *stream = NULL;
7706         struct drm_connector *connector;
7707         struct drm_connector_state *new_con_state;
7708         struct amdgpu_dm_connector *aconnector;
7709         struct dm_connector_state *dm_conn_state;
7710         int i, j;
7711         int vcpi, pbn_div, pbn, slot_num = 0;
7712
7713         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7714
7715                 aconnector = to_amdgpu_dm_connector(connector);
7716
7717                 if (!aconnector->port)
7718                         continue;
7719
7720                 if (!new_con_state || !new_con_state->crtc)
7721                         continue;
7722
7723                 dm_conn_state = to_dm_connector_state(new_con_state);
7724
7725                 for (j = 0; j < dc_state->stream_count; j++) {
7726                         stream = dc_state->streams[j];
7727                         if (!stream)
7728                                 continue;
7729
7730                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7731                                 break;
7732
7733                         stream = NULL;
7734                 }
7735
7736                 if (!stream)
7737                         continue;
7738
7739                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7740                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7741                 for (j = 0; j < dc_state->stream_count; j++) {
7742                         if (vars[j].aconnector == aconnector) {
7743                                 pbn = vars[j].pbn;
7744                                 break;
7745                         }
7746                 }
7747
7748                 if (j == dc_state->stream_count)
7749                         continue;
7750
7751                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7752
7753                 if (stream->timing.flags.DSC != 1) {
7754                         dm_conn_state->pbn = pbn;
7755                         dm_conn_state->vcpi_slots = slot_num;
7756
7757                         drm_dp_mst_atomic_enable_dsc(state,
7758                                                      aconnector->port,
7759                                                      dm_conn_state->pbn,
7760                                                      0,
7761                                                      false);
7762                         continue;
7763                 }
7764
7765                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7766                                                     aconnector->port,
7767                                                     pbn, pbn_div,
7768                                                     true);
7769                 if (vcpi < 0)
7770                         return vcpi;
7771
7772                 dm_conn_state->pbn = pbn;
7773                 dm_conn_state->vcpi_slots = vcpi;
7774         }
7775         return 0;
7776 }
7777 #endif
7778
7779 static void dm_drm_plane_reset(struct drm_plane *plane)
7780 {
7781         struct dm_plane_state *amdgpu_state = NULL;
7782
7783         if (plane->state)
7784                 plane->funcs->atomic_destroy_state(plane, plane->state);
7785
7786         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7787         WARN_ON(amdgpu_state == NULL);
7788
7789         if (amdgpu_state)
7790                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7791 }
7792
7793 static struct drm_plane_state *
7794 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7795 {
7796         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7797
7798         old_dm_plane_state = to_dm_plane_state(plane->state);
7799         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7800         if (!dm_plane_state)
7801                 return NULL;
7802
7803         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7804
7805         if (old_dm_plane_state->dc_state) {
7806                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7807                 dc_plane_state_retain(dm_plane_state->dc_state);
7808         }
7809
7810         return &dm_plane_state->base;
7811 }
7812
7813 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7814                                 struct drm_plane_state *state)
7815 {
7816         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7817
7818         if (dm_plane_state->dc_state)
7819                 dc_plane_state_release(dm_plane_state->dc_state);
7820
7821         drm_atomic_helper_plane_destroy_state(plane, state);
7822 }
7823
7824 static const struct drm_plane_funcs dm_plane_funcs = {
7825         .update_plane   = drm_atomic_helper_update_plane,
7826         .disable_plane  = drm_atomic_helper_disable_plane,
7827         .destroy        = drm_primary_helper_destroy,
7828         .reset = dm_drm_plane_reset,
7829         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7830         .atomic_destroy_state = dm_drm_plane_destroy_state,
7831         .format_mod_supported = dm_plane_format_mod_supported,
7832 };
7833
7834 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7835                                       struct drm_plane_state *new_state)
7836 {
7837         struct amdgpu_framebuffer *afb;
7838         struct drm_gem_object *obj;
7839         struct amdgpu_device *adev;
7840         struct amdgpu_bo *rbo;
7841         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7842         uint32_t domain;
7843         int r;
7844
7845         if (!new_state->fb) {
7846                 DRM_DEBUG_KMS("No FB bound\n");
7847                 return 0;
7848         }
7849
7850         afb = to_amdgpu_framebuffer(new_state->fb);
7851         obj = new_state->fb->obj[0];
7852         rbo = gem_to_amdgpu_bo(obj);
7853         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7854
7855         r = amdgpu_bo_reserve(rbo, true);
7856         if (r) {
7857                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7858                 return r;
7859         }
7860
7861         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7862         if (r) {
7863                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7864                 goto error_unlock;
7865         }
7866
7867         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7868                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7869         else
7870                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7871
7872         r = amdgpu_bo_pin(rbo, domain);
7873         if (unlikely(r != 0)) {
7874                 if (r != -ERESTARTSYS)
7875                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7876                 goto error_unlock;
7877         }
7878
7879         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7880         if (unlikely(r != 0)) {
7881                 DRM_ERROR("%p bind failed\n", rbo);
7882                 goto error_unpin;
7883         }
7884
7885         r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7886         if (unlikely(r != 0))
7887                 goto error_unpin;
7888
7889         amdgpu_bo_unreserve(rbo);
7890
7891         afb->address = amdgpu_bo_gpu_offset(rbo);
7892
7893         amdgpu_bo_ref(rbo);
7894
7895         /**
7896          * We don't do surface updates on planes that have been newly created,
7897          * but we also don't have the afb->address during atomic check.
7898          *
7899          * Fill in buffer attributes depending on the address here, but only on
7900          * newly created planes since they're not being used by DC yet and this
7901          * won't modify global state.
7902          */
7903         dm_plane_state_old = to_dm_plane_state(plane->state);
7904         dm_plane_state_new = to_dm_plane_state(new_state);
7905
7906         if (dm_plane_state_new->dc_state &&
7907             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7908                 struct dc_plane_state *plane_state =
7909                         dm_plane_state_new->dc_state;
7910                 bool force_disable_dcc = !plane_state->dcc.enable;
7911
7912                 fill_plane_buffer_attributes(
7913                         adev, afb, plane_state->format, plane_state->rotation,
7914                         afb->tiling_flags,
7915                         &plane_state->tiling_info, &plane_state->plane_size,
7916                         &plane_state->dcc, &plane_state->address,
7917                         afb->tmz_surface, force_disable_dcc);
7918         }
7919
7920         return 0;
7921
7922 error_unpin:
7923         amdgpu_bo_unpin(rbo);
7924
7925 error_unlock:
7926         amdgpu_bo_unreserve(rbo);
7927         return r;
7928 }
7929
7930 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7931                                        struct drm_plane_state *old_state)
7932 {
7933         struct amdgpu_bo *rbo;
7934         int r;
7935
7936         if (!old_state->fb)
7937                 return;
7938
7939         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7940         r = amdgpu_bo_reserve(rbo, false);
7941         if (unlikely(r)) {
7942                 DRM_ERROR("failed to reserve rbo before unpin\n");
7943                 return;
7944         }
7945
7946         amdgpu_bo_unpin(rbo);
7947         amdgpu_bo_unreserve(rbo);
7948         amdgpu_bo_unref(&rbo);
7949 }
7950
7951 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7952                                        struct drm_crtc_state *new_crtc_state)
7953 {
7954         struct drm_framebuffer *fb = state->fb;
7955         int min_downscale, max_upscale;
7956         int min_scale = 0;
7957         int max_scale = INT_MAX;
7958
7959         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7960         if (fb && state->crtc) {
7961                 /* Validate viewport to cover the case when only the position changes */
7962                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7963                         int viewport_width = state->crtc_w;
7964                         int viewport_height = state->crtc_h;
7965
7966                         if (state->crtc_x < 0)
7967                                 viewport_width += state->crtc_x;
7968                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7969                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7970
7971                         if (state->crtc_y < 0)
7972                                 viewport_height += state->crtc_y;
7973                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7974                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7975
7976                         if (viewport_width < 0 || viewport_height < 0) {
7977                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7978                                 return -EINVAL;
7979                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7980                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7981                                 return -EINVAL;
7982                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7983                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7984                                 return -EINVAL;
7985                         }
7986
7987                 }
7988
7989                 /* Get min/max allowed scaling factors from plane caps. */
7990                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7991                                              &min_downscale, &max_upscale);
7992                 /*
7993                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7994                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7995                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7996                  */
7997                 min_scale = (1000 << 16) / max_upscale;
7998                 max_scale = (1000 << 16) / min_downscale;
7999         }
8000
8001         return drm_atomic_helper_check_plane_state(
8002                 state, new_crtc_state, min_scale, max_scale, true, true);
8003 }
8004
8005 static int dm_plane_atomic_check(struct drm_plane *plane,
8006                                  struct drm_atomic_state *state)
8007 {
8008         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
8009                                                                                  plane);
8010         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8011         struct dc *dc = adev->dm.dc;
8012         struct dm_plane_state *dm_plane_state;
8013         struct dc_scaling_info scaling_info;
8014         struct drm_crtc_state *new_crtc_state;
8015         int ret;
8016
8017         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
8018
8019         dm_plane_state = to_dm_plane_state(new_plane_state);
8020
8021         if (!dm_plane_state->dc_state)
8022                 return 0;
8023
8024         new_crtc_state =
8025                 drm_atomic_get_new_crtc_state(state,
8026                                               new_plane_state->crtc);
8027         if (!new_crtc_state)
8028                 return -EINVAL;
8029
8030         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8031         if (ret)
8032                 return ret;
8033
8034         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8035         if (ret)
8036                 return ret;
8037
8038         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8039                 return 0;
8040
8041         return -EINVAL;
8042 }
8043
8044 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8045                                        struct drm_atomic_state *state)
8046 {
8047         /* Only support async updates on cursor planes. */
8048         if (plane->type != DRM_PLANE_TYPE_CURSOR)
8049                 return -EINVAL;
8050
8051         return 0;
8052 }
8053
8054 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8055                                          struct drm_atomic_state *state)
8056 {
8057         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8058                                                                            plane);
8059         struct drm_plane_state *old_state =
8060                 drm_atomic_get_old_plane_state(state, plane);
8061
8062         trace_amdgpu_dm_atomic_update_cursor(new_state);
8063
8064         swap(plane->state->fb, new_state->fb);
8065
8066         plane->state->src_x = new_state->src_x;
8067         plane->state->src_y = new_state->src_y;
8068         plane->state->src_w = new_state->src_w;
8069         plane->state->src_h = new_state->src_h;
8070         plane->state->crtc_x = new_state->crtc_x;
8071         plane->state->crtc_y = new_state->crtc_y;
8072         plane->state->crtc_w = new_state->crtc_w;
8073         plane->state->crtc_h = new_state->crtc_h;
8074
8075         handle_cursor_update(plane, old_state);
8076 }
8077
8078 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8079         .prepare_fb = dm_plane_helper_prepare_fb,
8080         .cleanup_fb = dm_plane_helper_cleanup_fb,
8081         .atomic_check = dm_plane_atomic_check,
8082         .atomic_async_check = dm_plane_atomic_async_check,
8083         .atomic_async_update = dm_plane_atomic_async_update
8084 };
8085
8086 /*
8087  * TODO: these are currently initialized to rgb formats only.
8088  * For future use cases we should either initialize them dynamically based on
8089  * plane capabilities, or initialize this array to all formats, so internal drm
8090  * check will succeed, and let DC implement proper check
8091  */
8092 static const uint32_t rgb_formats[] = {
8093         DRM_FORMAT_XRGB8888,
8094         DRM_FORMAT_ARGB8888,
8095         DRM_FORMAT_RGBA8888,
8096         DRM_FORMAT_XRGB2101010,
8097         DRM_FORMAT_XBGR2101010,
8098         DRM_FORMAT_ARGB2101010,
8099         DRM_FORMAT_ABGR2101010,
8100         DRM_FORMAT_XRGB16161616,
8101         DRM_FORMAT_XBGR16161616,
8102         DRM_FORMAT_ARGB16161616,
8103         DRM_FORMAT_ABGR16161616,
8104         DRM_FORMAT_XBGR8888,
8105         DRM_FORMAT_ABGR8888,
8106         DRM_FORMAT_RGB565,
8107 };
8108
8109 static const uint32_t overlay_formats[] = {
8110         DRM_FORMAT_XRGB8888,
8111         DRM_FORMAT_ARGB8888,
8112         DRM_FORMAT_RGBA8888,
8113         DRM_FORMAT_XBGR8888,
8114         DRM_FORMAT_ABGR8888,
8115         DRM_FORMAT_RGB565
8116 };
8117
8118 static const u32 cursor_formats[] = {
8119         DRM_FORMAT_ARGB8888
8120 };
8121
8122 static int get_plane_formats(const struct drm_plane *plane,
8123                              const struct dc_plane_cap *plane_cap,
8124                              uint32_t *formats, int max_formats)
8125 {
8126         int i, num_formats = 0;
8127
8128         /*
8129          * TODO: Query support for each group of formats directly from
8130          * DC plane caps. This will require adding more formats to the
8131          * caps list.
8132          */
8133
8134         switch (plane->type) {
8135         case DRM_PLANE_TYPE_PRIMARY:
8136                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8137                         if (num_formats >= max_formats)
8138                                 break;
8139
8140                         formats[num_formats++] = rgb_formats[i];
8141                 }
8142
8143                 if (plane_cap && plane_cap->pixel_format_support.nv12)
8144                         formats[num_formats++] = DRM_FORMAT_NV12;
8145                 if (plane_cap && plane_cap->pixel_format_support.p010)
8146                         formats[num_formats++] = DRM_FORMAT_P010;
8147                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8148                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8149                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8150                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8151                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8152                 }
8153                 break;
8154
8155         case DRM_PLANE_TYPE_OVERLAY:
8156                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8157                         if (num_formats >= max_formats)
8158                                 break;
8159
8160                         formats[num_formats++] = overlay_formats[i];
8161                 }
8162                 break;
8163
8164         case DRM_PLANE_TYPE_CURSOR:
8165                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8166                         if (num_formats >= max_formats)
8167                                 break;
8168
8169                         formats[num_formats++] = cursor_formats[i];
8170                 }
8171                 break;
8172         }
8173
8174         return num_formats;
8175 }
8176
8177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8178                                 struct drm_plane *plane,
8179                                 unsigned long possible_crtcs,
8180                                 const struct dc_plane_cap *plane_cap)
8181 {
8182         uint32_t formats[32];
8183         int num_formats;
8184         int res = -EPERM;
8185         unsigned int supported_rotations;
8186         uint64_t *modifiers = NULL;
8187
8188         num_formats = get_plane_formats(plane, plane_cap, formats,
8189                                         ARRAY_SIZE(formats));
8190
8191         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8192         if (res)
8193                 return res;
8194
8195         if (modifiers == NULL)
8196                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8197
8198         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8199                                        &dm_plane_funcs, formats, num_formats,
8200                                        modifiers, plane->type, NULL);
8201         kfree(modifiers);
8202         if (res)
8203                 return res;
8204
8205         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8206             plane_cap && plane_cap->per_pixel_alpha) {
8207                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8208                                           BIT(DRM_MODE_BLEND_PREMULTI) |
8209                                           BIT(DRM_MODE_BLEND_COVERAGE);
8210
8211                 drm_plane_create_alpha_property(plane);
8212                 drm_plane_create_blend_mode_property(plane, blend_caps);
8213         }
8214
8215         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8216             plane_cap &&
8217             (plane_cap->pixel_format_support.nv12 ||
8218              plane_cap->pixel_format_support.p010)) {
8219                 /* This only affects YUV formats. */
8220                 drm_plane_create_color_properties(
8221                         plane,
8222                         BIT(DRM_COLOR_YCBCR_BT601) |
8223                         BIT(DRM_COLOR_YCBCR_BT709) |
8224                         BIT(DRM_COLOR_YCBCR_BT2020),
8225                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8226                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8227                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8228         }
8229
8230         supported_rotations =
8231                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8232                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8233
8234         if (dm->adev->asic_type >= CHIP_BONAIRE &&
8235             plane->type != DRM_PLANE_TYPE_CURSOR)
8236                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8237                                                    supported_rotations);
8238
8239         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8240
8241         /* Create (reset) the plane state */
8242         if (plane->funcs->reset)
8243                 plane->funcs->reset(plane);
8244
8245         return 0;
8246 }
8247
8248 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8249                                struct drm_plane *plane,
8250                                uint32_t crtc_index)
8251 {
8252         struct amdgpu_crtc *acrtc = NULL;
8253         struct drm_plane *cursor_plane;
8254
8255         int res = -ENOMEM;
8256
8257         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8258         if (!cursor_plane)
8259                 goto fail;
8260
8261         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8262         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8263
8264         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8265         if (!acrtc)
8266                 goto fail;
8267
8268         res = drm_crtc_init_with_planes(
8269                         dm->ddev,
8270                         &acrtc->base,
8271                         plane,
8272                         cursor_plane,
8273                         &amdgpu_dm_crtc_funcs, NULL);
8274
8275         if (res)
8276                 goto fail;
8277
8278         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8279
8280         /* Create (reset) the plane state */
8281         if (acrtc->base.funcs->reset)
8282                 acrtc->base.funcs->reset(&acrtc->base);
8283
8284         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8285         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8286
8287         acrtc->crtc_id = crtc_index;
8288         acrtc->base.enabled = false;
8289         acrtc->otg_inst = -1;
8290
8291         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8292         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8293                                    true, MAX_COLOR_LUT_ENTRIES);
8294         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8295
8296         return 0;
8297
8298 fail:
8299         kfree(acrtc);
8300         kfree(cursor_plane);
8301         return res;
8302 }
8303
8304
8305 static int to_drm_connector_type(enum signal_type st)
8306 {
8307         switch (st) {
8308         case SIGNAL_TYPE_HDMI_TYPE_A:
8309                 return DRM_MODE_CONNECTOR_HDMIA;
8310         case SIGNAL_TYPE_EDP:
8311                 return DRM_MODE_CONNECTOR_eDP;
8312         case SIGNAL_TYPE_LVDS:
8313                 return DRM_MODE_CONNECTOR_LVDS;
8314         case SIGNAL_TYPE_RGB:
8315                 return DRM_MODE_CONNECTOR_VGA;
8316         case SIGNAL_TYPE_DISPLAY_PORT:
8317         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8318                 return DRM_MODE_CONNECTOR_DisplayPort;
8319         case SIGNAL_TYPE_DVI_DUAL_LINK:
8320         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8321                 return DRM_MODE_CONNECTOR_DVID;
8322         case SIGNAL_TYPE_VIRTUAL:
8323                 return DRM_MODE_CONNECTOR_VIRTUAL;
8324
8325         default:
8326                 return DRM_MODE_CONNECTOR_Unknown;
8327         }
8328 }
8329
8330 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8331 {
8332         struct drm_encoder *encoder;
8333
8334         /* There is only one encoder per connector */
8335         drm_connector_for_each_possible_encoder(connector, encoder)
8336                 return encoder;
8337
8338         return NULL;
8339 }
8340
8341 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8342 {
8343         struct drm_encoder *encoder;
8344         struct amdgpu_encoder *amdgpu_encoder;
8345
8346         encoder = amdgpu_dm_connector_to_encoder(connector);
8347
8348         if (encoder == NULL)
8349                 return;
8350
8351         amdgpu_encoder = to_amdgpu_encoder(encoder);
8352
8353         amdgpu_encoder->native_mode.clock = 0;
8354
8355         if (!list_empty(&connector->probed_modes)) {
8356                 struct drm_display_mode *preferred_mode = NULL;
8357
8358                 list_for_each_entry(preferred_mode,
8359                                     &connector->probed_modes,
8360                                     head) {
8361                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8362                                 amdgpu_encoder->native_mode = *preferred_mode;
8363
8364                         break;
8365                 }
8366
8367         }
8368 }
8369
8370 static struct drm_display_mode *
8371 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8372                              char *name,
8373                              int hdisplay, int vdisplay)
8374 {
8375         struct drm_device *dev = encoder->dev;
8376         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8377         struct drm_display_mode *mode = NULL;
8378         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8379
8380         mode = drm_mode_duplicate(dev, native_mode);
8381
8382         if (mode == NULL)
8383                 return NULL;
8384
8385         mode->hdisplay = hdisplay;
8386         mode->vdisplay = vdisplay;
8387         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8388         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8389
8390         return mode;
8391
8392 }
8393
8394 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8395                                                  struct drm_connector *connector)
8396 {
8397         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8398         struct drm_display_mode *mode = NULL;
8399         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8400         struct amdgpu_dm_connector *amdgpu_dm_connector =
8401                                 to_amdgpu_dm_connector(connector);
8402         int i;
8403         int n;
8404         struct mode_size {
8405                 char name[DRM_DISPLAY_MODE_LEN];
8406                 int w;
8407                 int h;
8408         } common_modes[] = {
8409                 {  "640x480",  640,  480},
8410                 {  "800x600",  800,  600},
8411                 { "1024x768", 1024,  768},
8412                 { "1280x720", 1280,  720},
8413                 { "1280x800", 1280,  800},
8414                 {"1280x1024", 1280, 1024},
8415                 { "1440x900", 1440,  900},
8416                 {"1680x1050", 1680, 1050},
8417                 {"1600x1200", 1600, 1200},
8418                 {"1920x1080", 1920, 1080},
8419                 {"1920x1200", 1920, 1200}
8420         };
8421
8422         n = ARRAY_SIZE(common_modes);
8423
8424         for (i = 0; i < n; i++) {
8425                 struct drm_display_mode *curmode = NULL;
8426                 bool mode_existed = false;
8427
8428                 if (common_modes[i].w > native_mode->hdisplay ||
8429                     common_modes[i].h > native_mode->vdisplay ||
8430                    (common_modes[i].w == native_mode->hdisplay &&
8431                     common_modes[i].h == native_mode->vdisplay))
8432                         continue;
8433
8434                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8435                         if (common_modes[i].w == curmode->hdisplay &&
8436                             common_modes[i].h == curmode->vdisplay) {
8437                                 mode_existed = true;
8438                                 break;
8439                         }
8440                 }
8441
8442                 if (mode_existed)
8443                         continue;
8444
8445                 mode = amdgpu_dm_create_common_mode(encoder,
8446                                 common_modes[i].name, common_modes[i].w,
8447                                 common_modes[i].h);
8448                 if (!mode)
8449                         continue;
8450
8451                 drm_mode_probed_add(connector, mode);
8452                 amdgpu_dm_connector->num_modes++;
8453         }
8454 }
8455
8456 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8457 {
8458         struct drm_encoder *encoder;
8459         struct amdgpu_encoder *amdgpu_encoder;
8460         const struct drm_display_mode *native_mode;
8461
8462         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8463             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8464                 return;
8465
8466         encoder = amdgpu_dm_connector_to_encoder(connector);
8467         if (!encoder)
8468                 return;
8469
8470         amdgpu_encoder = to_amdgpu_encoder(encoder);
8471
8472         native_mode = &amdgpu_encoder->native_mode;
8473         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8474                 return;
8475
8476         drm_connector_set_panel_orientation_with_quirk(connector,
8477                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8478                                                        native_mode->hdisplay,
8479                                                        native_mode->vdisplay);
8480 }
8481
8482 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8483                                               struct edid *edid)
8484 {
8485         struct amdgpu_dm_connector *amdgpu_dm_connector =
8486                         to_amdgpu_dm_connector(connector);
8487
8488         if (edid) {
8489                 /* empty probed_modes */
8490                 INIT_LIST_HEAD(&connector->probed_modes);
8491                 amdgpu_dm_connector->num_modes =
8492                                 drm_add_edid_modes(connector, edid);
8493
8494                 /* sorting the probed modes before calling function
8495                  * amdgpu_dm_get_native_mode() since EDID can have
8496                  * more than one preferred mode. The modes that are
8497                  * later in the probed mode list could be of higher
8498                  * and preferred resolution. For example, 3840x2160
8499                  * resolution in base EDID preferred timing and 4096x2160
8500                  * preferred resolution in DID extension block later.
8501                  */
8502                 drm_mode_sort(&connector->probed_modes);
8503                 amdgpu_dm_get_native_mode(connector);
8504
8505                 /* Freesync capabilities are reset by calling
8506                  * drm_add_edid_modes() and need to be
8507                  * restored here.
8508                  */
8509                 amdgpu_dm_update_freesync_caps(connector, edid);
8510
8511                 amdgpu_set_panel_orientation(connector);
8512         } else {
8513                 amdgpu_dm_connector->num_modes = 0;
8514         }
8515 }
8516
8517 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8518                               struct drm_display_mode *mode)
8519 {
8520         struct drm_display_mode *m;
8521
8522         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8523                 if (drm_mode_equal(m, mode))
8524                         return true;
8525         }
8526
8527         return false;
8528 }
8529
8530 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8531 {
8532         const struct drm_display_mode *m;
8533         struct drm_display_mode *new_mode;
8534         uint i;
8535         uint32_t new_modes_count = 0;
8536
8537         /* Standard FPS values
8538          *
8539          * 23.976       - TV/NTSC
8540          * 24           - Cinema
8541          * 25           - TV/PAL
8542          * 29.97        - TV/NTSC
8543          * 30           - TV/NTSC
8544          * 48           - Cinema HFR
8545          * 50           - TV/PAL
8546          * 60           - Commonly used
8547          * 48,72,96,120 - Multiples of 24
8548          */
8549         static const uint32_t common_rates[] = {
8550                 23976, 24000, 25000, 29970, 30000,
8551                 48000, 50000, 60000, 72000, 96000, 120000
8552         };
8553
8554         /*
8555          * Find mode with highest refresh rate with the same resolution
8556          * as the preferred mode. Some monitors report a preferred mode
8557          * with lower resolution than the highest refresh rate supported.
8558          */
8559
8560         m = get_highest_refresh_rate_mode(aconnector, true);
8561         if (!m)
8562                 return 0;
8563
8564         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8565                 uint64_t target_vtotal, target_vtotal_diff;
8566                 uint64_t num, den;
8567
8568                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8569                         continue;
8570
8571                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8572                     common_rates[i] > aconnector->max_vfreq * 1000)
8573                         continue;
8574
8575                 num = (unsigned long long)m->clock * 1000 * 1000;
8576                 den = common_rates[i] * (unsigned long long)m->htotal;
8577                 target_vtotal = div_u64(num, den);
8578                 target_vtotal_diff = target_vtotal - m->vtotal;
8579
8580                 /* Check for illegal modes */
8581                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8582                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8583                     m->vtotal + target_vtotal_diff < m->vsync_end)
8584                         continue;
8585
8586                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8587                 if (!new_mode)
8588                         goto out;
8589
8590                 new_mode->vtotal += (u16)target_vtotal_diff;
8591                 new_mode->vsync_start += (u16)target_vtotal_diff;
8592                 new_mode->vsync_end += (u16)target_vtotal_diff;
8593                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8594                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8595
8596                 if (!is_duplicate_mode(aconnector, new_mode)) {
8597                         drm_mode_probed_add(&aconnector->base, new_mode);
8598                         new_modes_count += 1;
8599                 } else
8600                         drm_mode_destroy(aconnector->base.dev, new_mode);
8601         }
8602  out:
8603         return new_modes_count;
8604 }
8605
8606 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8607                                                    struct edid *edid)
8608 {
8609         struct amdgpu_dm_connector *amdgpu_dm_connector =
8610                 to_amdgpu_dm_connector(connector);
8611
8612         if (!edid)
8613                 return;
8614
8615         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8616                 amdgpu_dm_connector->num_modes +=
8617                         add_fs_modes(amdgpu_dm_connector);
8618 }
8619
8620 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8621 {
8622         struct amdgpu_dm_connector *amdgpu_dm_connector =
8623                         to_amdgpu_dm_connector(connector);
8624         struct drm_encoder *encoder;
8625         struct edid *edid = amdgpu_dm_connector->edid;
8626
8627         encoder = amdgpu_dm_connector_to_encoder(connector);
8628
8629         if (!drm_edid_is_valid(edid)) {
8630                 amdgpu_dm_connector->num_modes =
8631                                 drm_add_modes_noedid(connector, 640, 480);
8632         } else {
8633                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8634                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8635                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8636         }
8637         amdgpu_dm_fbc_init(connector);
8638
8639         return amdgpu_dm_connector->num_modes;
8640 }
8641
8642 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8643                                      struct amdgpu_dm_connector *aconnector,
8644                                      int connector_type,
8645                                      struct dc_link *link,
8646                                      int link_index)
8647 {
8648         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8649
8650         /*
8651          * Some of the properties below require access to state, like bpc.
8652          * Allocate some default initial connector state with our reset helper.
8653          */
8654         if (aconnector->base.funcs->reset)
8655                 aconnector->base.funcs->reset(&aconnector->base);
8656
8657         aconnector->connector_id = link_index;
8658         aconnector->dc_link = link;
8659         aconnector->base.interlace_allowed = false;
8660         aconnector->base.doublescan_allowed = false;
8661         aconnector->base.stereo_allowed = false;
8662         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8663         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8664         aconnector->audio_inst = -1;
8665         mutex_init(&aconnector->hpd_lock);
8666
8667         /*
8668          * configure support HPD hot plug connector_>polled default value is 0
8669          * which means HPD hot plug not supported
8670          */
8671         switch (connector_type) {
8672         case DRM_MODE_CONNECTOR_HDMIA:
8673                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8674                 aconnector->base.ycbcr_420_allowed =
8675                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8676                 break;
8677         case DRM_MODE_CONNECTOR_DisplayPort:
8678                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8679                 link->link_enc = link_enc_cfg_get_link_enc(link);
8680                 ASSERT(link->link_enc);
8681                 if (link->link_enc)
8682                         aconnector->base.ycbcr_420_allowed =
8683                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8684                 break;
8685         case DRM_MODE_CONNECTOR_DVID:
8686                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8687                 break;
8688         default:
8689                 break;
8690         }
8691
8692         drm_object_attach_property(&aconnector->base.base,
8693                                 dm->ddev->mode_config.scaling_mode_property,
8694                                 DRM_MODE_SCALE_NONE);
8695
8696         drm_object_attach_property(&aconnector->base.base,
8697                                 adev->mode_info.underscan_property,
8698                                 UNDERSCAN_OFF);
8699         drm_object_attach_property(&aconnector->base.base,
8700                                 adev->mode_info.underscan_hborder_property,
8701                                 0);
8702         drm_object_attach_property(&aconnector->base.base,
8703                                 adev->mode_info.underscan_vborder_property,
8704                                 0);
8705
8706         if (!aconnector->mst_port)
8707                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8708
8709         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8710         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8711         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8712
8713         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8714             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8715                 drm_object_attach_property(&aconnector->base.base,
8716                                 adev->mode_info.abm_level_property, 0);
8717         }
8718
8719         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8720             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8721             connector_type == DRM_MODE_CONNECTOR_eDP) {
8722                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8723
8724                 if (!aconnector->mst_port)
8725                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8726
8727 #ifdef CONFIG_DRM_AMD_DC_HDCP
8728                 if (adev->dm.hdcp_workqueue)
8729                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8730 #endif
8731         }
8732 }
8733
8734 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8735                               struct i2c_msg *msgs, int num)
8736 {
8737         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8738         struct ddc_service *ddc_service = i2c->ddc_service;
8739         struct i2c_command cmd;
8740         int i;
8741         int result = -EIO;
8742
8743         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8744
8745         if (!cmd.payloads)
8746                 return result;
8747
8748         cmd.number_of_payloads = num;
8749         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8750         cmd.speed = 100;
8751
8752         for (i = 0; i < num; i++) {
8753                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8754                 cmd.payloads[i].address = msgs[i].addr;
8755                 cmd.payloads[i].length = msgs[i].len;
8756                 cmd.payloads[i].data = msgs[i].buf;
8757         }
8758
8759         if (dc_submit_i2c(
8760                         ddc_service->ctx->dc,
8761                         ddc_service->link->link_index,
8762                         &cmd))
8763                 result = num;
8764
8765         kfree(cmd.payloads);
8766         return result;
8767 }
8768
8769 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8770 {
8771         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8772 }
8773
8774 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8775         .master_xfer = amdgpu_dm_i2c_xfer,
8776         .functionality = amdgpu_dm_i2c_func,
8777 };
8778
8779 static struct amdgpu_i2c_adapter *
8780 create_i2c(struct ddc_service *ddc_service,
8781            int link_index,
8782            int *res)
8783 {
8784         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8785         struct amdgpu_i2c_adapter *i2c;
8786
8787         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8788         if (!i2c)
8789                 return NULL;
8790         i2c->base.owner = THIS_MODULE;
8791         i2c->base.class = I2C_CLASS_DDC;
8792         i2c->base.dev.parent = &adev->pdev->dev;
8793         i2c->base.algo = &amdgpu_dm_i2c_algo;
8794         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8795         i2c_set_adapdata(&i2c->base, i2c);
8796         i2c->ddc_service = ddc_service;
8797
8798         return i2c;
8799 }
8800
8801
8802 /*
8803  * Note: this function assumes that dc_link_detect() was called for the
8804  * dc_link which will be represented by this aconnector.
8805  */
8806 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8807                                     struct amdgpu_dm_connector *aconnector,
8808                                     uint32_t link_index,
8809                                     struct amdgpu_encoder *aencoder)
8810 {
8811         int res = 0;
8812         int connector_type;
8813         struct dc *dc = dm->dc;
8814         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8815         struct amdgpu_i2c_adapter *i2c;
8816
8817         link->priv = aconnector;
8818
8819         DRM_DEBUG_DRIVER("%s()\n", __func__);
8820
8821         i2c = create_i2c(link->ddc, link->link_index, &res);
8822         if (!i2c) {
8823                 DRM_ERROR("Failed to create i2c adapter data\n");
8824                 return -ENOMEM;
8825         }
8826
8827         aconnector->i2c = i2c;
8828         res = i2c_add_adapter(&i2c->base);
8829
8830         if (res) {
8831                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8832                 goto out_free;
8833         }
8834
8835         connector_type = to_drm_connector_type(link->connector_signal);
8836
8837         res = drm_connector_init_with_ddc(
8838                         dm->ddev,
8839                         &aconnector->base,
8840                         &amdgpu_dm_connector_funcs,
8841                         connector_type,
8842                         &i2c->base);
8843
8844         if (res) {
8845                 DRM_ERROR("connector_init failed\n");
8846                 aconnector->connector_id = -1;
8847                 goto out_free;
8848         }
8849
8850         drm_connector_helper_add(
8851                         &aconnector->base,
8852                         &amdgpu_dm_connector_helper_funcs);
8853
8854         amdgpu_dm_connector_init_helper(
8855                 dm,
8856                 aconnector,
8857                 connector_type,
8858                 link,
8859                 link_index);
8860
8861         drm_connector_attach_encoder(
8862                 &aconnector->base, &aencoder->base);
8863
8864         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8865                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8866                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8867
8868 out_free:
8869         if (res) {
8870                 kfree(i2c);
8871                 aconnector->i2c = NULL;
8872         }
8873         return res;
8874 }
8875
8876 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8877 {
8878         switch (adev->mode_info.num_crtc) {
8879         case 1:
8880                 return 0x1;
8881         case 2:
8882                 return 0x3;
8883         case 3:
8884                 return 0x7;
8885         case 4:
8886                 return 0xf;
8887         case 5:
8888                 return 0x1f;
8889         case 6:
8890         default:
8891                 return 0x3f;
8892         }
8893 }
8894
8895 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8896                                   struct amdgpu_encoder *aencoder,
8897                                   uint32_t link_index)
8898 {
8899         struct amdgpu_device *adev = drm_to_adev(dev);
8900
8901         int res = drm_encoder_init(dev,
8902                                    &aencoder->base,
8903                                    &amdgpu_dm_encoder_funcs,
8904                                    DRM_MODE_ENCODER_TMDS,
8905                                    NULL);
8906
8907         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8908
8909         if (!res)
8910                 aencoder->encoder_id = link_index;
8911         else
8912                 aencoder->encoder_id = -1;
8913
8914         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8915
8916         return res;
8917 }
8918
8919 static void manage_dm_interrupts(struct amdgpu_device *adev,
8920                                  struct amdgpu_crtc *acrtc,
8921                                  bool enable)
8922 {
8923         /*
8924          * We have no guarantee that the frontend index maps to the same
8925          * backend index - some even map to more than one.
8926          *
8927          * TODO: Use a different interrupt or check DC itself for the mapping.
8928          */
8929         int irq_type =
8930                 amdgpu_display_crtc_idx_to_irq_type(
8931                         adev,
8932                         acrtc->crtc_id);
8933
8934         if (enable) {
8935                 drm_crtc_vblank_on(&acrtc->base);
8936                 amdgpu_irq_get(
8937                         adev,
8938                         &adev->pageflip_irq,
8939                         irq_type);
8940 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8941                 amdgpu_irq_get(
8942                         adev,
8943                         &adev->vline0_irq,
8944                         irq_type);
8945 #endif
8946         } else {
8947 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8948                 amdgpu_irq_put(
8949                         adev,
8950                         &adev->vline0_irq,
8951                         irq_type);
8952 #endif
8953                 amdgpu_irq_put(
8954                         adev,
8955                         &adev->pageflip_irq,
8956                         irq_type);
8957                 drm_crtc_vblank_off(&acrtc->base);
8958         }
8959 }
8960
8961 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8962                                       struct amdgpu_crtc *acrtc)
8963 {
8964         int irq_type =
8965                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8966
8967         /**
8968          * This reads the current state for the IRQ and force reapplies
8969          * the setting to hardware.
8970          */
8971         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8972 }
8973
8974 static bool
8975 is_scaling_state_different(const struct dm_connector_state *dm_state,
8976                            const struct dm_connector_state *old_dm_state)
8977 {
8978         if (dm_state->scaling != old_dm_state->scaling)
8979                 return true;
8980         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8981                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8982                         return true;
8983         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8984                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8985                         return true;
8986         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8987                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8988                 return true;
8989         return false;
8990 }
8991
8992 #ifdef CONFIG_DRM_AMD_DC_HDCP
8993 static bool is_content_protection_different(struct drm_connector_state *state,
8994                                             const struct drm_connector_state *old_state,
8995                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8996 {
8997         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8998         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8999
9000         /* Handle: Type0/1 change */
9001         if (old_state->hdcp_content_type != state->hdcp_content_type &&
9002             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
9003                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9004                 return true;
9005         }
9006
9007         /* CP is being re enabled, ignore this
9008          *
9009          * Handles:     ENABLED -> DESIRED
9010          */
9011         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
9012             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9013                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
9014                 return false;
9015         }
9016
9017         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9018          *
9019          * Handles:     UNDESIRED -> ENABLED
9020          */
9021         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9022             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9023                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9024
9025         /* Stream removed and re-enabled
9026          *
9027          * Can sometimes overlap with the HPD case,
9028          * thus set update_hdcp to false to avoid
9029          * setting HDCP multiple times.
9030          *
9031          * Handles:     DESIRED -> DESIRED (Special case)
9032          */
9033         if (!(old_state->crtc && old_state->crtc->enabled) &&
9034                 state->crtc && state->crtc->enabled &&
9035                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9036                 dm_con_state->update_hdcp = false;
9037                 return true;
9038         }
9039
9040         /* Hot-plug, headless s3, dpms
9041          *
9042          * Only start HDCP if the display is connected/enabled.
9043          * update_hdcp flag will be set to false until the next
9044          * HPD comes in.
9045          *
9046          * Handles:     DESIRED -> DESIRED (Special case)
9047          */
9048         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9049             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9050                 dm_con_state->update_hdcp = false;
9051                 return true;
9052         }
9053
9054         /*
9055          * Handles:     UNDESIRED -> UNDESIRED
9056          *              DESIRED -> DESIRED
9057          *              ENABLED -> ENABLED
9058          */
9059         if (old_state->content_protection == state->content_protection)
9060                 return false;
9061
9062         /*
9063          * Handles:     UNDESIRED -> DESIRED
9064          *              DESIRED -> UNDESIRED
9065          *              ENABLED -> UNDESIRED
9066          */
9067         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9068                 return true;
9069
9070         /*
9071          * Handles:     DESIRED -> ENABLED
9072          */
9073         return false;
9074 }
9075
9076 #endif
9077 static void remove_stream(struct amdgpu_device *adev,
9078                           struct amdgpu_crtc *acrtc,
9079                           struct dc_stream_state *stream)
9080 {
9081         /* this is the update mode case */
9082
9083         acrtc->otg_inst = -1;
9084         acrtc->enabled = false;
9085 }
9086
9087 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9088                                struct dc_cursor_position *position)
9089 {
9090         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9091         int x, y;
9092         int xorigin = 0, yorigin = 0;
9093
9094         if (!crtc || !plane->state->fb)
9095                 return 0;
9096
9097         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9098             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9099                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9100                           __func__,
9101                           plane->state->crtc_w,
9102                           plane->state->crtc_h);
9103                 return -EINVAL;
9104         }
9105
9106         x = plane->state->crtc_x;
9107         y = plane->state->crtc_y;
9108
9109         if (x <= -amdgpu_crtc->max_cursor_width ||
9110             y <= -amdgpu_crtc->max_cursor_height)
9111                 return 0;
9112
9113         if (x < 0) {
9114                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9115                 x = 0;
9116         }
9117         if (y < 0) {
9118                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9119                 y = 0;
9120         }
9121         position->enable = true;
9122         position->translate_by_source = true;
9123         position->x = x;
9124         position->y = y;
9125         position->x_hotspot = xorigin;
9126         position->y_hotspot = yorigin;
9127
9128         return 0;
9129 }
9130
9131 static void handle_cursor_update(struct drm_plane *plane,
9132                                  struct drm_plane_state *old_plane_state)
9133 {
9134         struct amdgpu_device *adev = drm_to_adev(plane->dev);
9135         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9136         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9137         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9138         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9139         uint64_t address = afb ? afb->address : 0;
9140         struct dc_cursor_position position = {0};
9141         struct dc_cursor_attributes attributes;
9142         int ret;
9143
9144         if (!plane->state->fb && !old_plane_state->fb)
9145                 return;
9146
9147         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9148                       __func__,
9149                       amdgpu_crtc->crtc_id,
9150                       plane->state->crtc_w,
9151                       plane->state->crtc_h);
9152
9153         ret = get_cursor_position(plane, crtc, &position);
9154         if (ret)
9155                 return;
9156
9157         if (!position.enable) {
9158                 /* turn off cursor */
9159                 if (crtc_state && crtc_state->stream) {
9160                         mutex_lock(&adev->dm.dc_lock);
9161                         dc_stream_set_cursor_position(crtc_state->stream,
9162                                                       &position);
9163                         mutex_unlock(&adev->dm.dc_lock);
9164                 }
9165                 return;
9166         }
9167
9168         amdgpu_crtc->cursor_width = plane->state->crtc_w;
9169         amdgpu_crtc->cursor_height = plane->state->crtc_h;
9170
9171         memset(&attributes, 0, sizeof(attributes));
9172         attributes.address.high_part = upper_32_bits(address);
9173         attributes.address.low_part  = lower_32_bits(address);
9174         attributes.width             = plane->state->crtc_w;
9175         attributes.height            = plane->state->crtc_h;
9176         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9177         attributes.rotation_angle    = 0;
9178         attributes.attribute_flags.value = 0;
9179
9180         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9181
9182         if (crtc_state->stream) {
9183                 mutex_lock(&adev->dm.dc_lock);
9184                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9185                                                          &attributes))
9186                         DRM_ERROR("DC failed to set cursor attributes\n");
9187
9188                 if (!dc_stream_set_cursor_position(crtc_state->stream,
9189                                                    &position))
9190                         DRM_ERROR("DC failed to set cursor position\n");
9191                 mutex_unlock(&adev->dm.dc_lock);
9192         }
9193 }
9194
9195 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9196 {
9197
9198         assert_spin_locked(&acrtc->base.dev->event_lock);
9199         WARN_ON(acrtc->event);
9200
9201         acrtc->event = acrtc->base.state->event;
9202
9203         /* Set the flip status */
9204         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9205
9206         /* Mark this event as consumed */
9207         acrtc->base.state->event = NULL;
9208
9209         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9210                      acrtc->crtc_id);
9211 }
9212
9213 static void update_freesync_state_on_stream(
9214         struct amdgpu_display_manager *dm,
9215         struct dm_crtc_state *new_crtc_state,
9216         struct dc_stream_state *new_stream,
9217         struct dc_plane_state *surface,
9218         u32 flip_timestamp_in_us)
9219 {
9220         struct mod_vrr_params vrr_params;
9221         struct dc_info_packet vrr_infopacket = {0};
9222         struct amdgpu_device *adev = dm->adev;
9223         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9224         unsigned long flags;
9225         bool pack_sdp_v1_3 = false;
9226
9227         if (!new_stream)
9228                 return;
9229
9230         /*
9231          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9232          * For now it's sufficient to just guard against these conditions.
9233          */
9234
9235         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9236                 return;
9237
9238         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9239         vrr_params = acrtc->dm_irq_params.vrr_params;
9240
9241         if (surface) {
9242                 mod_freesync_handle_preflip(
9243                         dm->freesync_module,
9244                         surface,
9245                         new_stream,
9246                         flip_timestamp_in_us,
9247                         &vrr_params);
9248
9249                 if (adev->family < AMDGPU_FAMILY_AI &&
9250                     amdgpu_dm_vrr_active(new_crtc_state)) {
9251                         mod_freesync_handle_v_update(dm->freesync_module,
9252                                                      new_stream, &vrr_params);
9253
9254                         /* Need to call this before the frame ends. */
9255                         dc_stream_adjust_vmin_vmax(dm->dc,
9256                                                    new_crtc_state->stream,
9257                                                    &vrr_params.adjust);
9258                 }
9259         }
9260
9261         mod_freesync_build_vrr_infopacket(
9262                 dm->freesync_module,
9263                 new_stream,
9264                 &vrr_params,
9265                 PACKET_TYPE_VRR,
9266                 TRANSFER_FUNC_UNKNOWN,
9267                 &vrr_infopacket,
9268                 pack_sdp_v1_3);
9269
9270         new_crtc_state->freesync_timing_changed |=
9271                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9272                         &vrr_params.adjust,
9273                         sizeof(vrr_params.adjust)) != 0);
9274
9275         new_crtc_state->freesync_vrr_info_changed |=
9276                 (memcmp(&new_crtc_state->vrr_infopacket,
9277                         &vrr_infopacket,
9278                         sizeof(vrr_infopacket)) != 0);
9279
9280         acrtc->dm_irq_params.vrr_params = vrr_params;
9281         new_crtc_state->vrr_infopacket = vrr_infopacket;
9282
9283         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9284         new_stream->vrr_infopacket = vrr_infopacket;
9285
9286         if (new_crtc_state->freesync_vrr_info_changed)
9287                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9288                               new_crtc_state->base.crtc->base.id,
9289                               (int)new_crtc_state->base.vrr_enabled,
9290                               (int)vrr_params.state);
9291
9292         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9293 }
9294
9295 static void update_stream_irq_parameters(
9296         struct amdgpu_display_manager *dm,
9297         struct dm_crtc_state *new_crtc_state)
9298 {
9299         struct dc_stream_state *new_stream = new_crtc_state->stream;
9300         struct mod_vrr_params vrr_params;
9301         struct mod_freesync_config config = new_crtc_state->freesync_config;
9302         struct amdgpu_device *adev = dm->adev;
9303         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9304         unsigned long flags;
9305
9306         if (!new_stream)
9307                 return;
9308
9309         /*
9310          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9311          * For now it's sufficient to just guard against these conditions.
9312          */
9313         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9314                 return;
9315
9316         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9317         vrr_params = acrtc->dm_irq_params.vrr_params;
9318
9319         if (new_crtc_state->vrr_supported &&
9320             config.min_refresh_in_uhz &&
9321             config.max_refresh_in_uhz) {
9322                 /*
9323                  * if freesync compatible mode was set, config.state will be set
9324                  * in atomic check
9325                  */
9326                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9327                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9328                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9329                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9330                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9331                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9332                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9333                 } else {
9334                         config.state = new_crtc_state->base.vrr_enabled ?
9335                                                      VRR_STATE_ACTIVE_VARIABLE :
9336                                                      VRR_STATE_INACTIVE;
9337                 }
9338         } else {
9339                 config.state = VRR_STATE_UNSUPPORTED;
9340         }
9341
9342         mod_freesync_build_vrr_params(dm->freesync_module,
9343                                       new_stream,
9344                                       &config, &vrr_params);
9345
9346         new_crtc_state->freesync_timing_changed |=
9347                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9348                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9349
9350         new_crtc_state->freesync_config = config;
9351         /* Copy state for access from DM IRQ handler */
9352         acrtc->dm_irq_params.freesync_config = config;
9353         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9354         acrtc->dm_irq_params.vrr_params = vrr_params;
9355         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9356 }
9357
9358 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9359                                             struct dm_crtc_state *new_state)
9360 {
9361         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9362         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9363
9364         if (!old_vrr_active && new_vrr_active) {
9365                 /* Transition VRR inactive -> active:
9366                  * While VRR is active, we must not disable vblank irq, as a
9367                  * reenable after disable would compute bogus vblank/pflip
9368                  * timestamps if it likely happened inside display front-porch.
9369                  *
9370                  * We also need vupdate irq for the actual core vblank handling
9371                  * at end of vblank.
9372                  */
9373                 dm_set_vupdate_irq(new_state->base.crtc, true);
9374                 drm_crtc_vblank_get(new_state->base.crtc);
9375                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9376                                  __func__, new_state->base.crtc->base.id);
9377         } else if (old_vrr_active && !new_vrr_active) {
9378                 /* Transition VRR active -> inactive:
9379                  * Allow vblank irq disable again for fixed refresh rate.
9380                  */
9381                 dm_set_vupdate_irq(new_state->base.crtc, false);
9382                 drm_crtc_vblank_put(new_state->base.crtc);
9383                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9384                                  __func__, new_state->base.crtc->base.id);
9385         }
9386 }
9387
9388 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9389 {
9390         struct drm_plane *plane;
9391         struct drm_plane_state *old_plane_state;
9392         int i;
9393
9394         /*
9395          * TODO: Make this per-stream so we don't issue redundant updates for
9396          * commits with multiple streams.
9397          */
9398         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9399                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9400                         handle_cursor_update(plane, old_plane_state);
9401 }
9402
9403 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9404                                     struct dc_state *dc_state,
9405                                     struct drm_device *dev,
9406                                     struct amdgpu_display_manager *dm,
9407                                     struct drm_crtc *pcrtc,
9408                                     bool wait_for_vblank)
9409 {
9410         uint32_t i;
9411         uint64_t timestamp_ns;
9412         struct drm_plane *plane;
9413         struct drm_plane_state *old_plane_state, *new_plane_state;
9414         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9415         struct drm_crtc_state *new_pcrtc_state =
9416                         drm_atomic_get_new_crtc_state(state, pcrtc);
9417         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9418         struct dm_crtc_state *dm_old_crtc_state =
9419                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9420         int planes_count = 0, vpos, hpos;
9421         unsigned long flags;
9422         uint32_t target_vblank, last_flip_vblank;
9423         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9424         bool cursor_update = false;
9425         bool pflip_present = false;
9426         struct {
9427                 struct dc_surface_update surface_updates[MAX_SURFACES];
9428                 struct dc_plane_info plane_infos[MAX_SURFACES];
9429                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9430                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9431                 struct dc_stream_update stream_update;
9432         } *bundle;
9433
9434         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9435
9436         if (!bundle) {
9437                 dm_error("Failed to allocate update bundle\n");
9438                 goto cleanup;
9439         }
9440
9441         /*
9442          * Disable the cursor first if we're disabling all the planes.
9443          * It'll remain on the screen after the planes are re-enabled
9444          * if we don't.
9445          */
9446         if (acrtc_state->active_planes == 0)
9447                 amdgpu_dm_commit_cursors(state);
9448
9449         /* update planes when needed */
9450         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9451                 struct drm_crtc *crtc = new_plane_state->crtc;
9452                 struct drm_crtc_state *new_crtc_state;
9453                 struct drm_framebuffer *fb = new_plane_state->fb;
9454                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9455                 bool plane_needs_flip;
9456                 struct dc_plane_state *dc_plane;
9457                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9458
9459                 /* Cursor plane is handled after stream updates */
9460                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9461                         if ((fb && crtc == pcrtc) ||
9462                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9463                                 cursor_update = true;
9464
9465                         continue;
9466                 }
9467
9468                 if (!fb || !crtc || pcrtc != crtc)
9469                         continue;
9470
9471                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9472                 if (!new_crtc_state->active)
9473                         continue;
9474
9475                 dc_plane = dm_new_plane_state->dc_state;
9476
9477                 bundle->surface_updates[planes_count].surface = dc_plane;
9478                 if (new_pcrtc_state->color_mgmt_changed) {
9479                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9480                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9481                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9482                 }
9483
9484                 fill_dc_scaling_info(dm->adev, new_plane_state,
9485                                      &bundle->scaling_infos[planes_count]);
9486
9487                 bundle->surface_updates[planes_count].scaling_info =
9488                         &bundle->scaling_infos[planes_count];
9489
9490                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9491
9492                 pflip_present = pflip_present || plane_needs_flip;
9493
9494                 if (!plane_needs_flip) {
9495                         planes_count += 1;
9496                         continue;
9497                 }
9498
9499                 fill_dc_plane_info_and_addr(
9500                         dm->adev, new_plane_state,
9501                         afb->tiling_flags,
9502                         &bundle->plane_infos[planes_count],
9503                         &bundle->flip_addrs[planes_count].address,
9504                         afb->tmz_surface, false);
9505
9506                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9507                                  new_plane_state->plane->index,
9508                                  bundle->plane_infos[planes_count].dcc.enable);
9509
9510                 bundle->surface_updates[planes_count].plane_info =
9511                         &bundle->plane_infos[planes_count];
9512
9513                 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9514                                     new_crtc_state,
9515                                     &bundle->flip_addrs[planes_count]);
9516
9517                 /*
9518                  * Only allow immediate flips for fast updates that don't
9519                  * change FB pitch, DCC state, rotation or mirroing.
9520                  */
9521                 bundle->flip_addrs[planes_count].flip_immediate =
9522                         crtc->state->async_flip &&
9523                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9524
9525                 timestamp_ns = ktime_get_ns();
9526                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9527                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9528                 bundle->surface_updates[planes_count].surface = dc_plane;
9529
9530                 if (!bundle->surface_updates[planes_count].surface) {
9531                         DRM_ERROR("No surface for CRTC: id=%d\n",
9532                                         acrtc_attach->crtc_id);
9533                         continue;
9534                 }
9535
9536                 if (plane == pcrtc->primary)
9537                         update_freesync_state_on_stream(
9538                                 dm,
9539                                 acrtc_state,
9540                                 acrtc_state->stream,
9541                                 dc_plane,
9542                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9543
9544                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9545                                  __func__,
9546                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9547                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9548
9549                 planes_count += 1;
9550
9551         }
9552
9553         if (pflip_present) {
9554                 if (!vrr_active) {
9555                         /* Use old throttling in non-vrr fixed refresh rate mode
9556                          * to keep flip scheduling based on target vblank counts
9557                          * working in a backwards compatible way, e.g., for
9558                          * clients using the GLX_OML_sync_control extension or
9559                          * DRI3/Present extension with defined target_msc.
9560                          */
9561                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9562                 }
9563                 else {
9564                         /* For variable refresh rate mode only:
9565                          * Get vblank of last completed flip to avoid > 1 vrr
9566                          * flips per video frame by use of throttling, but allow
9567                          * flip programming anywhere in the possibly large
9568                          * variable vrr vblank interval for fine-grained flip
9569                          * timing control and more opportunity to avoid stutter
9570                          * on late submission of flips.
9571                          */
9572                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9573                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9574                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9575                 }
9576
9577                 target_vblank = last_flip_vblank + wait_for_vblank;
9578
9579                 /*
9580                  * Wait until we're out of the vertical blank period before the one
9581                  * targeted by the flip
9582                  */
9583                 while ((acrtc_attach->enabled &&
9584                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9585                                                             0, &vpos, &hpos, NULL,
9586                                                             NULL, &pcrtc->hwmode)
9587                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9588                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9589                         (int)(target_vblank -
9590                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9591                         usleep_range(1000, 1100);
9592                 }
9593
9594                 /**
9595                  * Prepare the flip event for the pageflip interrupt to handle.
9596                  *
9597                  * This only works in the case where we've already turned on the
9598                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9599                  * from 0 -> n planes we have to skip a hardware generated event
9600                  * and rely on sending it from software.
9601                  */
9602                 if (acrtc_attach->base.state->event &&
9603                     acrtc_state->active_planes > 0) {
9604                         drm_crtc_vblank_get(pcrtc);
9605
9606                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9607
9608                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9609                         prepare_flip_isr(acrtc_attach);
9610
9611                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9612                 }
9613
9614                 if (acrtc_state->stream) {
9615                         if (acrtc_state->freesync_vrr_info_changed)
9616                                 bundle->stream_update.vrr_infopacket =
9617                                         &acrtc_state->stream->vrr_infopacket;
9618                 }
9619         } else if (cursor_update && acrtc_state->active_planes > 0 &&
9620                    acrtc_attach->base.state->event) {
9621                 drm_crtc_vblank_get(pcrtc);
9622
9623                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9624
9625                 acrtc_attach->event = acrtc_attach->base.state->event;
9626                 acrtc_attach->base.state->event = NULL;
9627
9628                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9629         }
9630
9631         /* Update the planes if changed or disable if we don't have any. */
9632         if ((planes_count || acrtc_state->active_planes == 0) &&
9633                 acrtc_state->stream) {
9634                 /*
9635                  * If PSR or idle optimizations are enabled then flush out
9636                  * any pending work before hardware programming.
9637                  */
9638                 if (dm->vblank_control_workqueue)
9639                         flush_workqueue(dm->vblank_control_workqueue);
9640
9641                 bundle->stream_update.stream = acrtc_state->stream;
9642                 if (new_pcrtc_state->mode_changed) {
9643                         bundle->stream_update.src = acrtc_state->stream->src;
9644                         bundle->stream_update.dst = acrtc_state->stream->dst;
9645                 }
9646
9647                 if (new_pcrtc_state->color_mgmt_changed) {
9648                         /*
9649                          * TODO: This isn't fully correct since we've actually
9650                          * already modified the stream in place.
9651                          */
9652                         bundle->stream_update.gamut_remap =
9653                                 &acrtc_state->stream->gamut_remap_matrix;
9654                         bundle->stream_update.output_csc_transform =
9655                                 &acrtc_state->stream->csc_color_matrix;
9656                         bundle->stream_update.out_transfer_func =
9657                                 acrtc_state->stream->out_transfer_func;
9658                 }
9659
9660                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9661                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9662                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9663
9664                 /*
9665                  * If FreeSync state on the stream has changed then we need to
9666                  * re-adjust the min/max bounds now that DC doesn't handle this
9667                  * as part of commit.
9668                  */
9669                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9670                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9671                         dc_stream_adjust_vmin_vmax(
9672                                 dm->dc, acrtc_state->stream,
9673                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9674                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9675                 }
9676                 mutex_lock(&dm->dc_lock);
9677                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9678                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9679                         amdgpu_dm_psr_disable(acrtc_state->stream);
9680
9681                 dc_commit_updates_for_stream(dm->dc,
9682                                                      bundle->surface_updates,
9683                                                      planes_count,
9684                                                      acrtc_state->stream,
9685                                                      &bundle->stream_update,
9686                                                      dc_state);
9687
9688                 /**
9689                  * Enable or disable the interrupts on the backend.
9690                  *
9691                  * Most pipes are put into power gating when unused.
9692                  *
9693                  * When power gating is enabled on a pipe we lose the
9694                  * interrupt enablement state when power gating is disabled.
9695                  *
9696                  * So we need to update the IRQ control state in hardware
9697                  * whenever the pipe turns on (since it could be previously
9698                  * power gated) or off (since some pipes can't be power gated
9699                  * on some ASICs).
9700                  */
9701                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9702                         dm_update_pflip_irq_state(drm_to_adev(dev),
9703                                                   acrtc_attach);
9704
9705                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9706                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9707                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9708                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9709
9710                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9711                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9712                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9713                         struct amdgpu_dm_connector *aconn =
9714                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9715
9716                         if (aconn->psr_skip_count > 0)
9717                                 aconn->psr_skip_count--;
9718
9719                         /* Allow PSR when skip count is 0. */
9720                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9721
9722                         /*
9723                          * If sink supports PSR SU, there is no need to rely on
9724                          * a vblank event disable request to enable PSR. PSR SU
9725                          * can be enabled immediately once OS demonstrates an
9726                          * adequate number of fast atomic commits to notify KMD
9727                          * of update events. See `vblank_control_worker()`.
9728                          */
9729                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9730                             acrtc_attach->dm_irq_params.allow_psr_entry &&
9731                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
9732                                 amdgpu_dm_psr_enable(acrtc_state->stream);
9733                 } else {
9734                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9735                 }
9736
9737                 mutex_unlock(&dm->dc_lock);
9738         }
9739
9740         /*
9741          * Update cursor state *after* programming all the planes.
9742          * This avoids redundant programming in the case where we're going
9743          * to be disabling a single plane - those pipes are being disabled.
9744          */
9745         if (acrtc_state->active_planes)
9746                 amdgpu_dm_commit_cursors(state);
9747
9748 cleanup:
9749         kfree(bundle);
9750 }
9751
9752 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9753                                    struct drm_atomic_state *state)
9754 {
9755         struct amdgpu_device *adev = drm_to_adev(dev);
9756         struct amdgpu_dm_connector *aconnector;
9757         struct drm_connector *connector;
9758         struct drm_connector_state *old_con_state, *new_con_state;
9759         struct drm_crtc_state *new_crtc_state;
9760         struct dm_crtc_state *new_dm_crtc_state;
9761         const struct dc_stream_status *status;
9762         int i, inst;
9763
9764         /* Notify device removals. */
9765         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9766                 if (old_con_state->crtc != new_con_state->crtc) {
9767                         /* CRTC changes require notification. */
9768                         goto notify;
9769                 }
9770
9771                 if (!new_con_state->crtc)
9772                         continue;
9773
9774                 new_crtc_state = drm_atomic_get_new_crtc_state(
9775                         state, new_con_state->crtc);
9776
9777                 if (!new_crtc_state)
9778                         continue;
9779
9780                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9781                         continue;
9782
9783         notify:
9784                 aconnector = to_amdgpu_dm_connector(connector);
9785
9786                 mutex_lock(&adev->dm.audio_lock);
9787                 inst = aconnector->audio_inst;
9788                 aconnector->audio_inst = -1;
9789                 mutex_unlock(&adev->dm.audio_lock);
9790
9791                 amdgpu_dm_audio_eld_notify(adev, inst);
9792         }
9793
9794         /* Notify audio device additions. */
9795         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9796                 if (!new_con_state->crtc)
9797                         continue;
9798
9799                 new_crtc_state = drm_atomic_get_new_crtc_state(
9800                         state, new_con_state->crtc);
9801
9802                 if (!new_crtc_state)
9803                         continue;
9804
9805                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9806                         continue;
9807
9808                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9809                 if (!new_dm_crtc_state->stream)
9810                         continue;
9811
9812                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9813                 if (!status)
9814                         continue;
9815
9816                 aconnector = to_amdgpu_dm_connector(connector);
9817
9818                 mutex_lock(&adev->dm.audio_lock);
9819                 inst = status->audio_inst;
9820                 aconnector->audio_inst = inst;
9821                 mutex_unlock(&adev->dm.audio_lock);
9822
9823                 amdgpu_dm_audio_eld_notify(adev, inst);
9824         }
9825 }
9826
9827 /*
9828  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9829  * @crtc_state: the DRM CRTC state
9830  * @stream_state: the DC stream state.
9831  *
9832  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9833  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9834  */
9835 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9836                                                 struct dc_stream_state *stream_state)
9837 {
9838         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9839 }
9840
9841 /**
9842  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9843  * @state: The atomic state to commit
9844  *
9845  * This will tell DC to commit the constructed DC state from atomic_check,
9846  * programming the hardware. Any failures here implies a hardware failure, since
9847  * atomic check should have filtered anything non-kosher.
9848  */
9849 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9850 {
9851         struct drm_device *dev = state->dev;
9852         struct amdgpu_device *adev = drm_to_adev(dev);
9853         struct amdgpu_display_manager *dm = &adev->dm;
9854         struct dm_atomic_state *dm_state;
9855         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9856         uint32_t i, j;
9857         struct drm_crtc *crtc;
9858         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9859         unsigned long flags;
9860         bool wait_for_vblank = true;
9861         struct drm_connector *connector;
9862         struct drm_connector_state *old_con_state, *new_con_state;
9863         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9864         int crtc_disable_count = 0;
9865         bool mode_set_reset_required = false;
9866         int r;
9867
9868         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9869
9870         r = drm_atomic_helper_wait_for_fences(dev, state, false);
9871         if (unlikely(r))
9872                 DRM_ERROR("Waiting for fences timed out!");
9873
9874         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9875
9876         dm_state = dm_atomic_get_new_state(state);
9877         if (dm_state && dm_state->context) {
9878                 dc_state = dm_state->context;
9879         } else {
9880                 /* No state changes, retain current state. */
9881                 dc_state_temp = dc_create_state(dm->dc);
9882                 ASSERT(dc_state_temp);
9883                 dc_state = dc_state_temp;
9884                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9885         }
9886
9887         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9888                                        new_crtc_state, i) {
9889                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9890
9891                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9892
9893                 if (old_crtc_state->active &&
9894                     (!new_crtc_state->active ||
9895                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9896                         manage_dm_interrupts(adev, acrtc, false);
9897                         dc_stream_release(dm_old_crtc_state->stream);
9898                 }
9899         }
9900
9901         drm_atomic_helper_calc_timestamping_constants(state);
9902
9903         /* update changed items */
9904         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9905                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9906
9907                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9908                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9909
9910                 drm_dbg_state(state->dev,
9911                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9912                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9913                         "connectors_changed:%d\n",
9914                         acrtc->crtc_id,
9915                         new_crtc_state->enable,
9916                         new_crtc_state->active,
9917                         new_crtc_state->planes_changed,
9918                         new_crtc_state->mode_changed,
9919                         new_crtc_state->active_changed,
9920                         new_crtc_state->connectors_changed);
9921
9922                 /* Disable cursor if disabling crtc */
9923                 if (old_crtc_state->active && !new_crtc_state->active) {
9924                         struct dc_cursor_position position;
9925
9926                         memset(&position, 0, sizeof(position));
9927                         mutex_lock(&dm->dc_lock);
9928                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9929                         mutex_unlock(&dm->dc_lock);
9930                 }
9931
9932                 /* Copy all transient state flags into dc state */
9933                 if (dm_new_crtc_state->stream) {
9934                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9935                                                             dm_new_crtc_state->stream);
9936                 }
9937
9938                 /* handles headless hotplug case, updating new_state and
9939                  * aconnector as needed
9940                  */
9941
9942                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9943
9944                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9945
9946                         if (!dm_new_crtc_state->stream) {
9947                                 /*
9948                                  * this could happen because of issues with
9949                                  * userspace notifications delivery.
9950                                  * In this case userspace tries to set mode on
9951                                  * display which is disconnected in fact.
9952                                  * dc_sink is NULL in this case on aconnector.
9953                                  * We expect reset mode will come soon.
9954                                  *
9955                                  * This can also happen when unplug is done
9956                                  * during resume sequence ended
9957                                  *
9958                                  * In this case, we want to pretend we still
9959                                  * have a sink to keep the pipe running so that
9960                                  * hw state is consistent with the sw state
9961                                  */
9962                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9963                                                 __func__, acrtc->base.base.id);
9964                                 continue;
9965                         }
9966
9967                         if (dm_old_crtc_state->stream)
9968                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9969
9970                         pm_runtime_get_noresume(dev->dev);
9971
9972                         acrtc->enabled = true;
9973                         acrtc->hw_mode = new_crtc_state->mode;
9974                         crtc->hwmode = new_crtc_state->mode;
9975                         mode_set_reset_required = true;
9976                 } else if (modereset_required(new_crtc_state)) {
9977                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9978                         /* i.e. reset mode */
9979                         if (dm_old_crtc_state->stream)
9980                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9981
9982                         mode_set_reset_required = true;
9983                 }
9984         } /* for_each_crtc_in_state() */
9985
9986         if (dc_state) {
9987                 /* if there mode set or reset, disable eDP PSR */
9988                 if (mode_set_reset_required) {
9989                         if (dm->vblank_control_workqueue)
9990                                 flush_workqueue(dm->vblank_control_workqueue);
9991
9992                         amdgpu_dm_psr_disable_all(dm);
9993                 }
9994
9995                 dm_enable_per_frame_crtc_master_sync(dc_state);
9996                 mutex_lock(&dm->dc_lock);
9997                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9998
9999                 /* Allow idle optimization when vblank count is 0 for display off */
10000                 if (dm->active_vblank_irq_count == 0)
10001                         dc_allow_idle_optimizations(dm->dc, true);
10002                 mutex_unlock(&dm->dc_lock);
10003         }
10004
10005         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10006                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10007
10008                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10009
10010                 if (dm_new_crtc_state->stream != NULL) {
10011                         const struct dc_stream_status *status =
10012                                         dc_stream_get_status(dm_new_crtc_state->stream);
10013
10014                         if (!status)
10015                                 status = dc_stream_get_status_from_state(dc_state,
10016                                                                          dm_new_crtc_state->stream);
10017                         if (!status)
10018                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
10019                         else
10020                                 acrtc->otg_inst = status->primary_otg_inst;
10021                 }
10022         }
10023 #ifdef CONFIG_DRM_AMD_DC_HDCP
10024         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10025                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10026                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10027                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10028
10029                 new_crtc_state = NULL;
10030
10031                 if (acrtc)
10032                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10033
10034                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10035
10036                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10037                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10038                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10039                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10040                         dm_new_con_state->update_hdcp = true;
10041                         continue;
10042                 }
10043
10044                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10045                         hdcp_update_display(
10046                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10047                                 new_con_state->hdcp_content_type,
10048                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10049         }
10050 #endif
10051
10052         /* Handle connector state changes */
10053         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10054                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10055                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10056                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10057                 struct dc_surface_update dummy_updates[MAX_SURFACES];
10058                 struct dc_stream_update stream_update;
10059                 struct dc_info_packet hdr_packet;
10060                 struct dc_stream_status *status = NULL;
10061                 bool abm_changed, hdr_changed, scaling_changed;
10062
10063                 memset(&dummy_updates, 0, sizeof(dummy_updates));
10064                 memset(&stream_update, 0, sizeof(stream_update));
10065
10066                 if (acrtc) {
10067                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10068                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10069                 }
10070
10071                 /* Skip any modesets/resets */
10072                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10073                         continue;
10074
10075                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10076                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10077
10078                 scaling_changed = is_scaling_state_different(dm_new_con_state,
10079                                                              dm_old_con_state);
10080
10081                 abm_changed = dm_new_crtc_state->abm_level !=
10082                               dm_old_crtc_state->abm_level;
10083
10084                 hdr_changed =
10085                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10086
10087                 if (!scaling_changed && !abm_changed && !hdr_changed)
10088                         continue;
10089
10090                 stream_update.stream = dm_new_crtc_state->stream;
10091                 if (scaling_changed) {
10092                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10093                                         dm_new_con_state, dm_new_crtc_state->stream);
10094
10095                         stream_update.src = dm_new_crtc_state->stream->src;
10096                         stream_update.dst = dm_new_crtc_state->stream->dst;
10097                 }
10098
10099                 if (abm_changed) {
10100                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10101
10102                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
10103                 }
10104
10105                 if (hdr_changed) {
10106                         fill_hdr_info_packet(new_con_state, &hdr_packet);
10107                         stream_update.hdr_static_metadata = &hdr_packet;
10108                 }
10109
10110                 status = dc_stream_get_status(dm_new_crtc_state->stream);
10111
10112                 if (WARN_ON(!status))
10113                         continue;
10114
10115                 WARN_ON(!status->plane_count);
10116
10117                 /*
10118                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
10119                  * Here we create an empty update on each plane.
10120                  * To fix this, DC should permit updating only stream properties.
10121                  */
10122                 for (j = 0; j < status->plane_count; j++)
10123                         dummy_updates[j].surface = status->plane_states[0];
10124
10125
10126                 mutex_lock(&dm->dc_lock);
10127                 dc_commit_updates_for_stream(dm->dc,
10128                                                      dummy_updates,
10129                                                      status->plane_count,
10130                                                      dm_new_crtc_state->stream,
10131                                                      &stream_update,
10132                                                      dc_state);
10133                 mutex_unlock(&dm->dc_lock);
10134         }
10135
10136         /* Count number of newly disabled CRTCs for dropping PM refs later. */
10137         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10138                                       new_crtc_state, i) {
10139                 if (old_crtc_state->active && !new_crtc_state->active)
10140                         crtc_disable_count++;
10141
10142                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10143                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10144
10145                 /* For freesync config update on crtc state and params for irq */
10146                 update_stream_irq_parameters(dm, dm_new_crtc_state);
10147
10148                 /* Handle vrr on->off / off->on transitions */
10149                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10150                                                 dm_new_crtc_state);
10151         }
10152
10153         /**
10154          * Enable interrupts for CRTCs that are newly enabled or went through
10155          * a modeset. It was intentionally deferred until after the front end
10156          * state was modified to wait until the OTG was on and so the IRQ
10157          * handlers didn't access stale or invalid state.
10158          */
10159         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10160                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10161 #ifdef CONFIG_DEBUG_FS
10162                 bool configure_crc = false;
10163                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10164 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10165                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10166 #endif
10167                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10168                 cur_crc_src = acrtc->dm_irq_params.crc_src;
10169                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10170 #endif
10171                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10172
10173                 if (new_crtc_state->active &&
10174                     (!old_crtc_state->active ||
10175                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10176                         dc_stream_retain(dm_new_crtc_state->stream);
10177                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10178                         manage_dm_interrupts(adev, acrtc, true);
10179
10180 #ifdef CONFIG_DEBUG_FS
10181                         /**
10182                          * Frontend may have changed so reapply the CRC capture
10183                          * settings for the stream.
10184                          */
10185                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10186
10187                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10188                                 configure_crc = true;
10189 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10190                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10191                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10192                                         acrtc->dm_irq_params.crc_window.update_win = true;
10193                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10194                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10195                                         crc_rd_wrk->crtc = crtc;
10196                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10197                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10198                                 }
10199 #endif
10200                         }
10201
10202                         if (configure_crc)
10203                                 if (amdgpu_dm_crtc_configure_crc_source(
10204                                         crtc, dm_new_crtc_state, cur_crc_src))
10205                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
10206 #endif
10207                 }
10208         }
10209
10210         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10211                 if (new_crtc_state->async_flip)
10212                         wait_for_vblank = false;
10213
10214         /* update planes when needed per crtc*/
10215         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10216                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10217
10218                 if (dm_new_crtc_state->stream)
10219                         amdgpu_dm_commit_planes(state, dc_state, dev,
10220                                                 dm, crtc, wait_for_vblank);
10221         }
10222
10223         /* Update audio instances for each connector. */
10224         amdgpu_dm_commit_audio(dev, state);
10225
10226         /* restore the backlight level */
10227         for (i = 0; i < dm->num_of_edps; i++) {
10228                 if (dm->backlight_dev[i] &&
10229                     (dm->actual_brightness[i] != dm->brightness[i]))
10230                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10231         }
10232
10233         /*
10234          * send vblank event on all events not handled in flip and
10235          * mark consumed event for drm_atomic_helper_commit_hw_done
10236          */
10237         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10238         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10239
10240                 if (new_crtc_state->event)
10241                         drm_send_event_locked(dev, &new_crtc_state->event->base);
10242
10243                 new_crtc_state->event = NULL;
10244         }
10245         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10246
10247         /* Signal HW programming completion */
10248         drm_atomic_helper_commit_hw_done(state);
10249
10250         if (wait_for_vblank)
10251                 drm_atomic_helper_wait_for_flip_done(dev, state);
10252
10253         drm_atomic_helper_cleanup_planes(dev, state);
10254
10255         /* return the stolen vga memory back to VRAM */
10256         if (!adev->mman.keep_stolen_vga_memory)
10257                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10258         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10259
10260         /*
10261          * Finally, drop a runtime PM reference for each newly disabled CRTC,
10262          * so we can put the GPU into runtime suspend if we're not driving any
10263          * displays anymore
10264          */
10265         for (i = 0; i < crtc_disable_count; i++)
10266                 pm_runtime_put_autosuspend(dev->dev);
10267         pm_runtime_mark_last_busy(dev->dev);
10268
10269         if (dc_state_temp)
10270                 dc_release_state(dc_state_temp);
10271 }
10272
10273
10274 static int dm_force_atomic_commit(struct drm_connector *connector)
10275 {
10276         int ret = 0;
10277         struct drm_device *ddev = connector->dev;
10278         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10279         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10280         struct drm_plane *plane = disconnected_acrtc->base.primary;
10281         struct drm_connector_state *conn_state;
10282         struct drm_crtc_state *crtc_state;
10283         struct drm_plane_state *plane_state;
10284
10285         if (!state)
10286                 return -ENOMEM;
10287
10288         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10289
10290         /* Construct an atomic state to restore previous display setting */
10291
10292         /*
10293          * Attach connectors to drm_atomic_state
10294          */
10295         conn_state = drm_atomic_get_connector_state(state, connector);
10296
10297         ret = PTR_ERR_OR_ZERO(conn_state);
10298         if (ret)
10299                 goto out;
10300
10301         /* Attach crtc to drm_atomic_state*/
10302         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10303
10304         ret = PTR_ERR_OR_ZERO(crtc_state);
10305         if (ret)
10306                 goto out;
10307
10308         /* force a restore */
10309         crtc_state->mode_changed = true;
10310
10311         /* Attach plane to drm_atomic_state */
10312         plane_state = drm_atomic_get_plane_state(state, plane);
10313
10314         ret = PTR_ERR_OR_ZERO(plane_state);
10315         if (ret)
10316                 goto out;
10317
10318         /* Call commit internally with the state we just constructed */
10319         ret = drm_atomic_commit(state);
10320
10321 out:
10322         drm_atomic_state_put(state);
10323         if (ret)
10324                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10325
10326         return ret;
10327 }
10328
10329 /*
10330  * This function handles all cases when set mode does not come upon hotplug.
10331  * This includes when a display is unplugged then plugged back into the
10332  * same port and when running without usermode desktop manager supprot
10333  */
10334 void dm_restore_drm_connector_state(struct drm_device *dev,
10335                                     struct drm_connector *connector)
10336 {
10337         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10338         struct amdgpu_crtc *disconnected_acrtc;
10339         struct dm_crtc_state *acrtc_state;
10340
10341         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10342                 return;
10343
10344         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10345         if (!disconnected_acrtc)
10346                 return;
10347
10348         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10349         if (!acrtc_state->stream)
10350                 return;
10351
10352         /*
10353          * If the previous sink is not released and different from the current,
10354          * we deduce we are in a state where we can not rely on usermode call
10355          * to turn on the display, so we do it here
10356          */
10357         if (acrtc_state->stream->sink != aconnector->dc_sink)
10358                 dm_force_atomic_commit(&aconnector->base);
10359 }
10360
10361 /*
10362  * Grabs all modesetting locks to serialize against any blocking commits,
10363  * Waits for completion of all non blocking commits.
10364  */
10365 static int do_aquire_global_lock(struct drm_device *dev,
10366                                  struct drm_atomic_state *state)
10367 {
10368         struct drm_crtc *crtc;
10369         struct drm_crtc_commit *commit;
10370         long ret;
10371
10372         /*
10373          * Adding all modeset locks to aquire_ctx will
10374          * ensure that when the framework release it the
10375          * extra locks we are locking here will get released to
10376          */
10377         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10378         if (ret)
10379                 return ret;
10380
10381         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10382                 spin_lock(&crtc->commit_lock);
10383                 commit = list_first_entry_or_null(&crtc->commit_list,
10384                                 struct drm_crtc_commit, commit_entry);
10385                 if (commit)
10386                         drm_crtc_commit_get(commit);
10387                 spin_unlock(&crtc->commit_lock);
10388
10389                 if (!commit)
10390                         continue;
10391
10392                 /*
10393                  * Make sure all pending HW programming completed and
10394                  * page flips done
10395                  */
10396                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10397
10398                 if (ret > 0)
10399                         ret = wait_for_completion_interruptible_timeout(
10400                                         &commit->flip_done, 10*HZ);
10401
10402                 if (ret == 0)
10403                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10404                                   "timed out\n", crtc->base.id, crtc->name);
10405
10406                 drm_crtc_commit_put(commit);
10407         }
10408
10409         return ret < 0 ? ret : 0;
10410 }
10411
10412 static void get_freesync_config_for_crtc(
10413         struct dm_crtc_state *new_crtc_state,
10414         struct dm_connector_state *new_con_state)
10415 {
10416         struct mod_freesync_config config = {0};
10417         struct amdgpu_dm_connector *aconnector =
10418                         to_amdgpu_dm_connector(new_con_state->base.connector);
10419         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10420         int vrefresh = drm_mode_vrefresh(mode);
10421         bool fs_vid_mode = false;
10422
10423         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10424                                         vrefresh >= aconnector->min_vfreq &&
10425                                         vrefresh <= aconnector->max_vfreq;
10426
10427         if (new_crtc_state->vrr_supported) {
10428                 new_crtc_state->stream->ignore_msa_timing_param = true;
10429                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10430
10431                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10432                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10433                 config.vsif_supported = true;
10434                 config.btr = true;
10435
10436                 if (fs_vid_mode) {
10437                         config.state = VRR_STATE_ACTIVE_FIXED;
10438                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10439                         goto out;
10440                 } else if (new_crtc_state->base.vrr_enabled) {
10441                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10442                 } else {
10443                         config.state = VRR_STATE_INACTIVE;
10444                 }
10445         }
10446 out:
10447         new_crtc_state->freesync_config = config;
10448 }
10449
10450 static void reset_freesync_config_for_crtc(
10451         struct dm_crtc_state *new_crtc_state)
10452 {
10453         new_crtc_state->vrr_supported = false;
10454
10455         memset(&new_crtc_state->vrr_infopacket, 0,
10456                sizeof(new_crtc_state->vrr_infopacket));
10457 }
10458
10459 static bool
10460 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10461                                  struct drm_crtc_state *new_crtc_state)
10462 {
10463         const struct drm_display_mode *old_mode, *new_mode;
10464
10465         if (!old_crtc_state || !new_crtc_state)
10466                 return false;
10467
10468         old_mode = &old_crtc_state->mode;
10469         new_mode = &new_crtc_state->mode;
10470
10471         if (old_mode->clock       == new_mode->clock &&
10472             old_mode->hdisplay    == new_mode->hdisplay &&
10473             old_mode->vdisplay    == new_mode->vdisplay &&
10474             old_mode->htotal      == new_mode->htotal &&
10475             old_mode->vtotal      != new_mode->vtotal &&
10476             old_mode->hsync_start == new_mode->hsync_start &&
10477             old_mode->vsync_start != new_mode->vsync_start &&
10478             old_mode->hsync_end   == new_mode->hsync_end &&
10479             old_mode->vsync_end   != new_mode->vsync_end &&
10480             old_mode->hskew       == new_mode->hskew &&
10481             old_mode->vscan       == new_mode->vscan &&
10482             (old_mode->vsync_end - old_mode->vsync_start) ==
10483             (new_mode->vsync_end - new_mode->vsync_start))
10484                 return true;
10485
10486         return false;
10487 }
10488
10489 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10490         uint64_t num, den, res;
10491         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10492
10493         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10494
10495         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10496         den = (unsigned long long)new_crtc_state->mode.htotal *
10497               (unsigned long long)new_crtc_state->mode.vtotal;
10498
10499         res = div_u64(num, den);
10500         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10501 }
10502
10503 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10504                          struct drm_atomic_state *state,
10505                          struct drm_crtc *crtc,
10506                          struct drm_crtc_state *old_crtc_state,
10507                          struct drm_crtc_state *new_crtc_state,
10508                          bool enable,
10509                          bool *lock_and_validation_needed)
10510 {
10511         struct dm_atomic_state *dm_state = NULL;
10512         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10513         struct dc_stream_state *new_stream;
10514         int ret = 0;
10515
10516         /*
10517          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10518          * update changed items
10519          */
10520         struct amdgpu_crtc *acrtc = NULL;
10521         struct amdgpu_dm_connector *aconnector = NULL;
10522         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10523         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10524
10525         new_stream = NULL;
10526
10527         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10528         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10529         acrtc = to_amdgpu_crtc(crtc);
10530         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10531
10532         /* TODO This hack should go away */
10533         if (aconnector && enable) {
10534                 /* Make sure fake sink is created in plug-in scenario */
10535                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10536                                                             &aconnector->base);
10537                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10538                                                             &aconnector->base);
10539
10540                 if (IS_ERR(drm_new_conn_state)) {
10541                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10542                         goto fail;
10543                 }
10544
10545                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10546                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10547
10548                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10549                         goto skip_modeset;
10550
10551                 new_stream = create_validate_stream_for_sink(aconnector,
10552                                                              &new_crtc_state->mode,
10553                                                              dm_new_conn_state,
10554                                                              dm_old_crtc_state->stream);
10555
10556                 /*
10557                  * we can have no stream on ACTION_SET if a display
10558                  * was disconnected during S3, in this case it is not an
10559                  * error, the OS will be updated after detection, and
10560                  * will do the right thing on next atomic commit
10561                  */
10562
10563                 if (!new_stream) {
10564                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10565                                         __func__, acrtc->base.base.id);
10566                         ret = -ENOMEM;
10567                         goto fail;
10568                 }
10569
10570                 /*
10571                  * TODO: Check VSDB bits to decide whether this should
10572                  * be enabled or not.
10573                  */
10574                 new_stream->triggered_crtc_reset.enabled =
10575                         dm->force_timing_sync;
10576
10577                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10578
10579                 ret = fill_hdr_info_packet(drm_new_conn_state,
10580                                            &new_stream->hdr_static_metadata);
10581                 if (ret)
10582                         goto fail;
10583
10584                 /*
10585                  * If we already removed the old stream from the context
10586                  * (and set the new stream to NULL) then we can't reuse
10587                  * the old stream even if the stream and scaling are unchanged.
10588                  * We'll hit the BUG_ON and black screen.
10589                  *
10590                  * TODO: Refactor this function to allow this check to work
10591                  * in all conditions.
10592                  */
10593                 if (dm_new_crtc_state->stream &&
10594                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10595                         goto skip_modeset;
10596
10597                 if (dm_new_crtc_state->stream &&
10598                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10599                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10600                         new_crtc_state->mode_changed = false;
10601                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10602                                          new_crtc_state->mode_changed);
10603                 }
10604         }
10605
10606         /* mode_changed flag may get updated above, need to check again */
10607         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10608                 goto skip_modeset;
10609
10610         drm_dbg_state(state->dev,
10611                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10612                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10613                 "connectors_changed:%d\n",
10614                 acrtc->crtc_id,
10615                 new_crtc_state->enable,
10616                 new_crtc_state->active,
10617                 new_crtc_state->planes_changed,
10618                 new_crtc_state->mode_changed,
10619                 new_crtc_state->active_changed,
10620                 new_crtc_state->connectors_changed);
10621
10622         /* Remove stream for any changed/disabled CRTC */
10623         if (!enable) {
10624
10625                 if (!dm_old_crtc_state->stream)
10626                         goto skip_modeset;
10627
10628                 if (dm_new_crtc_state->stream &&
10629                     is_timing_unchanged_for_freesync(new_crtc_state,
10630                                                      old_crtc_state)) {
10631                         new_crtc_state->mode_changed = false;
10632                         DRM_DEBUG_DRIVER(
10633                                 "Mode change not required for front porch change, "
10634                                 "setting mode_changed to %d",
10635                                 new_crtc_state->mode_changed);
10636
10637                         set_freesync_fixed_config(dm_new_crtc_state);
10638
10639                         goto skip_modeset;
10640                 } else if (aconnector &&
10641                            is_freesync_video_mode(&new_crtc_state->mode,
10642                                                   aconnector)) {
10643                         struct drm_display_mode *high_mode;
10644
10645                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10646                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10647                                 set_freesync_fixed_config(dm_new_crtc_state);
10648                         }
10649                 }
10650
10651                 ret = dm_atomic_get_state(state, &dm_state);
10652                 if (ret)
10653                         goto fail;
10654
10655                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10656                                 crtc->base.id);
10657
10658                 /* i.e. reset mode */
10659                 if (dc_remove_stream_from_ctx(
10660                                 dm->dc,
10661                                 dm_state->context,
10662                                 dm_old_crtc_state->stream) != DC_OK) {
10663                         ret = -EINVAL;
10664                         goto fail;
10665                 }
10666
10667                 dc_stream_release(dm_old_crtc_state->stream);
10668                 dm_new_crtc_state->stream = NULL;
10669
10670                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10671
10672                 *lock_and_validation_needed = true;
10673
10674         } else {/* Add stream for any updated/enabled CRTC */
10675                 /*
10676                  * Quick fix to prevent NULL pointer on new_stream when
10677                  * added MST connectors not found in existing crtc_state in the chained mode
10678                  * TODO: need to dig out the root cause of that
10679                  */
10680                 if (!aconnector)
10681                         goto skip_modeset;
10682
10683                 if (modereset_required(new_crtc_state))
10684                         goto skip_modeset;
10685
10686                 if (modeset_required(new_crtc_state, new_stream,
10687                                      dm_old_crtc_state->stream)) {
10688
10689                         WARN_ON(dm_new_crtc_state->stream);
10690
10691                         ret = dm_atomic_get_state(state, &dm_state);
10692                         if (ret)
10693                                 goto fail;
10694
10695                         dm_new_crtc_state->stream = new_stream;
10696
10697                         dc_stream_retain(new_stream);
10698
10699                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10700                                          crtc->base.id);
10701
10702                         if (dc_add_stream_to_ctx(
10703                                         dm->dc,
10704                                         dm_state->context,
10705                                         dm_new_crtc_state->stream) != DC_OK) {
10706                                 ret = -EINVAL;
10707                                 goto fail;
10708                         }
10709
10710                         *lock_and_validation_needed = true;
10711                 }
10712         }
10713
10714 skip_modeset:
10715         /* Release extra reference */
10716         if (new_stream)
10717                  dc_stream_release(new_stream);
10718
10719         /*
10720          * We want to do dc stream updates that do not require a
10721          * full modeset below.
10722          */
10723         if (!(enable && aconnector && new_crtc_state->active))
10724                 return 0;
10725         /*
10726          * Given above conditions, the dc state cannot be NULL because:
10727          * 1. We're in the process of enabling CRTCs (just been added
10728          *    to the dc context, or already is on the context)
10729          * 2. Has a valid connector attached, and
10730          * 3. Is currently active and enabled.
10731          * => The dc stream state currently exists.
10732          */
10733         BUG_ON(dm_new_crtc_state->stream == NULL);
10734
10735         /* Scaling or underscan settings */
10736         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10737                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10738                 update_stream_scaling_settings(
10739                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10740
10741         /* ABM settings */
10742         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10743
10744         /*
10745          * Color management settings. We also update color properties
10746          * when a modeset is needed, to ensure it gets reprogrammed.
10747          */
10748         if (dm_new_crtc_state->base.color_mgmt_changed ||
10749             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10750                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10751                 if (ret)
10752                         goto fail;
10753         }
10754
10755         /* Update Freesync settings. */
10756         get_freesync_config_for_crtc(dm_new_crtc_state,
10757                                      dm_new_conn_state);
10758
10759         return ret;
10760
10761 fail:
10762         if (new_stream)
10763                 dc_stream_release(new_stream);
10764         return ret;
10765 }
10766
10767 static bool should_reset_plane(struct drm_atomic_state *state,
10768                                struct drm_plane *plane,
10769                                struct drm_plane_state *old_plane_state,
10770                                struct drm_plane_state *new_plane_state)
10771 {
10772         struct drm_plane *other;
10773         struct drm_plane_state *old_other_state, *new_other_state;
10774         struct drm_crtc_state *new_crtc_state;
10775         int i;
10776
10777         /*
10778          * TODO: Remove this hack once the checks below are sufficient
10779          * enough to determine when we need to reset all the planes on
10780          * the stream.
10781          */
10782         if (state->allow_modeset)
10783                 return true;
10784
10785         /* Exit early if we know that we're adding or removing the plane. */
10786         if (old_plane_state->crtc != new_plane_state->crtc)
10787                 return true;
10788
10789         /* old crtc == new_crtc == NULL, plane not in context. */
10790         if (!new_plane_state->crtc)
10791                 return false;
10792
10793         new_crtc_state =
10794                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10795
10796         if (!new_crtc_state)
10797                 return true;
10798
10799         /* CRTC Degamma changes currently require us to recreate planes. */
10800         if (new_crtc_state->color_mgmt_changed)
10801                 return true;
10802
10803         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10804                 return true;
10805
10806         /*
10807          * If there are any new primary or overlay planes being added or
10808          * removed then the z-order can potentially change. To ensure
10809          * correct z-order and pipe acquisition the current DC architecture
10810          * requires us to remove and recreate all existing planes.
10811          *
10812          * TODO: Come up with a more elegant solution for this.
10813          */
10814         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10815                 struct amdgpu_framebuffer *old_afb, *new_afb;
10816                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10817                         continue;
10818
10819                 if (old_other_state->crtc != new_plane_state->crtc &&
10820                     new_other_state->crtc != new_plane_state->crtc)
10821                         continue;
10822
10823                 if (old_other_state->crtc != new_other_state->crtc)
10824                         return true;
10825
10826                 /* Src/dst size and scaling updates. */
10827                 if (old_other_state->src_w != new_other_state->src_w ||
10828                     old_other_state->src_h != new_other_state->src_h ||
10829                     old_other_state->crtc_w != new_other_state->crtc_w ||
10830                     old_other_state->crtc_h != new_other_state->crtc_h)
10831                         return true;
10832
10833                 /* Rotation / mirroring updates. */
10834                 if (old_other_state->rotation != new_other_state->rotation)
10835                         return true;
10836
10837                 /* Blending updates. */
10838                 if (old_other_state->pixel_blend_mode !=
10839                     new_other_state->pixel_blend_mode)
10840                         return true;
10841
10842                 /* Alpha updates. */
10843                 if (old_other_state->alpha != new_other_state->alpha)
10844                         return true;
10845
10846                 /* Colorspace changes. */
10847                 if (old_other_state->color_range != new_other_state->color_range ||
10848                     old_other_state->color_encoding != new_other_state->color_encoding)
10849                         return true;
10850
10851                 /* Framebuffer checks fall at the end. */
10852                 if (!old_other_state->fb || !new_other_state->fb)
10853                         continue;
10854
10855                 /* Pixel format changes can require bandwidth updates. */
10856                 if (old_other_state->fb->format != new_other_state->fb->format)
10857                         return true;
10858
10859                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10860                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10861
10862                 /* Tiling and DCC changes also require bandwidth updates. */
10863                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10864                     old_afb->base.modifier != new_afb->base.modifier)
10865                         return true;
10866         }
10867
10868         return false;
10869 }
10870
10871 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10872                               struct drm_plane_state *new_plane_state,
10873                               struct drm_framebuffer *fb)
10874 {
10875         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10876         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10877         unsigned int pitch;
10878         bool linear;
10879
10880         if (fb->width > new_acrtc->max_cursor_width ||
10881             fb->height > new_acrtc->max_cursor_height) {
10882                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10883                                  new_plane_state->fb->width,
10884                                  new_plane_state->fb->height);
10885                 return -EINVAL;
10886         }
10887         if (new_plane_state->src_w != fb->width << 16 ||
10888             new_plane_state->src_h != fb->height << 16) {
10889                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10890                 return -EINVAL;
10891         }
10892
10893         /* Pitch in pixels */
10894         pitch = fb->pitches[0] / fb->format->cpp[0];
10895
10896         if (fb->width != pitch) {
10897                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10898                                  fb->width, pitch);
10899                 return -EINVAL;
10900         }
10901
10902         switch (pitch) {
10903         case 64:
10904         case 128:
10905         case 256:
10906                 /* FB pitch is supported by cursor plane */
10907                 break;
10908         default:
10909                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10910                 return -EINVAL;
10911         }
10912
10913         /* Core DRM takes care of checking FB modifiers, so we only need to
10914          * check tiling flags when the FB doesn't have a modifier. */
10915         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10916                 if (adev->family < AMDGPU_FAMILY_AI) {
10917                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10918                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10919                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10920                 } else {
10921                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10922                 }
10923                 if (!linear) {
10924                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10925                         return -EINVAL;
10926                 }
10927         }
10928
10929         return 0;
10930 }
10931
10932 static int dm_update_plane_state(struct dc *dc,
10933                                  struct drm_atomic_state *state,
10934                                  struct drm_plane *plane,
10935                                  struct drm_plane_state *old_plane_state,
10936                                  struct drm_plane_state *new_plane_state,
10937                                  bool enable,
10938                                  bool *lock_and_validation_needed)
10939 {
10940
10941         struct dm_atomic_state *dm_state = NULL;
10942         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10943         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10944         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10945         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10946         struct amdgpu_crtc *new_acrtc;
10947         bool needs_reset;
10948         int ret = 0;
10949
10950
10951         new_plane_crtc = new_plane_state->crtc;
10952         old_plane_crtc = old_plane_state->crtc;
10953         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10954         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10955
10956         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10957                 if (!enable || !new_plane_crtc ||
10958                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10959                         return 0;
10960
10961                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10962
10963                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10964                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10965                         return -EINVAL;
10966                 }
10967
10968                 if (new_plane_state->fb) {
10969                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10970                                                  new_plane_state->fb);
10971                         if (ret)
10972                                 return ret;
10973                 }
10974
10975                 return 0;
10976         }
10977
10978         needs_reset = should_reset_plane(state, plane, old_plane_state,
10979                                          new_plane_state);
10980
10981         /* Remove any changed/removed planes */
10982         if (!enable) {
10983                 if (!needs_reset)
10984                         return 0;
10985
10986                 if (!old_plane_crtc)
10987                         return 0;
10988
10989                 old_crtc_state = drm_atomic_get_old_crtc_state(
10990                                 state, old_plane_crtc);
10991                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10992
10993                 if (!dm_old_crtc_state->stream)
10994                         return 0;
10995
10996                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10997                                 plane->base.id, old_plane_crtc->base.id);
10998
10999                 ret = dm_atomic_get_state(state, &dm_state);
11000                 if (ret)
11001                         return ret;
11002
11003                 if (!dc_remove_plane_from_context(
11004                                 dc,
11005                                 dm_old_crtc_state->stream,
11006                                 dm_old_plane_state->dc_state,
11007                                 dm_state->context)) {
11008
11009                         return -EINVAL;
11010                 }
11011
11012
11013                 dc_plane_state_release(dm_old_plane_state->dc_state);
11014                 dm_new_plane_state->dc_state = NULL;
11015
11016                 *lock_and_validation_needed = true;
11017
11018         } else { /* Add new planes */
11019                 struct dc_plane_state *dc_new_plane_state;
11020
11021                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11022                         return 0;
11023
11024                 if (!new_plane_crtc)
11025                         return 0;
11026
11027                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11028                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11029
11030                 if (!dm_new_crtc_state->stream)
11031                         return 0;
11032
11033                 if (!needs_reset)
11034                         return 0;
11035
11036                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11037                 if (ret)
11038                         return ret;
11039
11040                 WARN_ON(dm_new_plane_state->dc_state);
11041
11042                 dc_new_plane_state = dc_create_plane_state(dc);
11043                 if (!dc_new_plane_state)
11044                         return -ENOMEM;
11045
11046                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11047                                  plane->base.id, new_plane_crtc->base.id);
11048
11049                 ret = fill_dc_plane_attributes(
11050                         drm_to_adev(new_plane_crtc->dev),
11051                         dc_new_plane_state,
11052                         new_plane_state,
11053                         new_crtc_state);
11054                 if (ret) {
11055                         dc_plane_state_release(dc_new_plane_state);
11056                         return ret;
11057                 }
11058
11059                 ret = dm_atomic_get_state(state, &dm_state);
11060                 if (ret) {
11061                         dc_plane_state_release(dc_new_plane_state);
11062                         return ret;
11063                 }
11064
11065                 /*
11066                  * Any atomic check errors that occur after this will
11067                  * not need a release. The plane state will be attached
11068                  * to the stream, and therefore part of the atomic
11069                  * state. It'll be released when the atomic state is
11070                  * cleaned.
11071                  */
11072                 if (!dc_add_plane_to_context(
11073                                 dc,
11074                                 dm_new_crtc_state->stream,
11075                                 dc_new_plane_state,
11076                                 dm_state->context)) {
11077
11078                         dc_plane_state_release(dc_new_plane_state);
11079                         return -EINVAL;
11080                 }
11081
11082                 dm_new_plane_state->dc_state = dc_new_plane_state;
11083
11084                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11085
11086                 /* Tell DC to do a full surface update every time there
11087                  * is a plane change. Inefficient, but works for now.
11088                  */
11089                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11090
11091                 *lock_and_validation_needed = true;
11092         }
11093
11094
11095         return ret;
11096 }
11097
11098 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11099                                        int *src_w, int *src_h)
11100 {
11101         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11102         case DRM_MODE_ROTATE_90:
11103         case DRM_MODE_ROTATE_270:
11104                 *src_w = plane_state->src_h >> 16;
11105                 *src_h = plane_state->src_w >> 16;
11106                 break;
11107         case DRM_MODE_ROTATE_0:
11108         case DRM_MODE_ROTATE_180:
11109         default:
11110                 *src_w = plane_state->src_w >> 16;
11111                 *src_h = plane_state->src_h >> 16;
11112                 break;
11113         }
11114 }
11115
11116 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11117                                 struct drm_crtc *crtc,
11118                                 struct drm_crtc_state *new_crtc_state)
11119 {
11120         struct drm_plane *cursor = crtc->cursor, *underlying;
11121         struct drm_plane_state *new_cursor_state, *new_underlying_state;
11122         int i;
11123         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11124         int cursor_src_w, cursor_src_h;
11125         int underlying_src_w, underlying_src_h;
11126
11127         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11128          * cursor per pipe but it's going to inherit the scaling and
11129          * positioning from the underlying pipe. Check the cursor plane's
11130          * blending properties match the underlying planes'. */
11131
11132         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11133         if (!new_cursor_state || !new_cursor_state->fb) {
11134                 return 0;
11135         }
11136
11137         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11138         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11139         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11140
11141         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11142                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11143                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11144                         continue;
11145
11146                 /* Ignore disabled planes */
11147                 if (!new_underlying_state->fb)
11148                         continue;
11149
11150                 dm_get_oriented_plane_size(new_underlying_state,
11151                                            &underlying_src_w, &underlying_src_h);
11152                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11153                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11154
11155                 if (cursor_scale_w != underlying_scale_w ||
11156                     cursor_scale_h != underlying_scale_h) {
11157                         drm_dbg_atomic(crtc->dev,
11158                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11159                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11160                         return -EINVAL;
11161                 }
11162
11163                 /* If this plane covers the whole CRTC, no need to check planes underneath */
11164                 if (new_underlying_state->crtc_x <= 0 &&
11165                     new_underlying_state->crtc_y <= 0 &&
11166                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11167                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11168                         break;
11169         }
11170
11171         return 0;
11172 }
11173
11174 #if defined(CONFIG_DRM_AMD_DC_DCN)
11175 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11176 {
11177         struct drm_connector *connector;
11178         struct drm_connector_state *conn_state, *old_conn_state;
11179         struct amdgpu_dm_connector *aconnector = NULL;
11180         int i;
11181         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11182                 if (!conn_state->crtc)
11183                         conn_state = old_conn_state;
11184
11185                 if (conn_state->crtc != crtc)
11186                         continue;
11187
11188                 aconnector = to_amdgpu_dm_connector(connector);
11189                 if (!aconnector->port || !aconnector->mst_port)
11190                         aconnector = NULL;
11191                 else
11192                         break;
11193         }
11194
11195         if (!aconnector)
11196                 return 0;
11197
11198         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11199 }
11200 #endif
11201
11202 /**
11203  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11204  * @dev: The DRM device
11205  * @state: The atomic state to commit
11206  *
11207  * Validate that the given atomic state is programmable by DC into hardware.
11208  * This involves constructing a &struct dc_state reflecting the new hardware
11209  * state we wish to commit, then querying DC to see if it is programmable. It's
11210  * important not to modify the existing DC state. Otherwise, atomic_check
11211  * may unexpectedly commit hardware changes.
11212  *
11213  * When validating the DC state, it's important that the right locks are
11214  * acquired. For full updates case which removes/adds/updates streams on one
11215  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11216  * that any such full update commit will wait for completion of any outstanding
11217  * flip using DRMs synchronization events.
11218  *
11219  * Note that DM adds the affected connectors for all CRTCs in state, when that
11220  * might not seem necessary. This is because DC stream creation requires the
11221  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11222  * be possible but non-trivial - a possible TODO item.
11223  *
11224  * Return: -Error code if validation failed.
11225  */
11226 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11227                                   struct drm_atomic_state *state)
11228 {
11229         struct amdgpu_device *adev = drm_to_adev(dev);
11230         struct dm_atomic_state *dm_state = NULL;
11231         struct dc *dc = adev->dm.dc;
11232         struct drm_connector *connector;
11233         struct drm_connector_state *old_con_state, *new_con_state;
11234         struct drm_crtc *crtc;
11235         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11236         struct drm_plane *plane;
11237         struct drm_plane_state *old_plane_state, *new_plane_state;
11238         enum dc_status status;
11239         int ret, i;
11240         bool lock_and_validation_needed = false;
11241         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11242 #if defined(CONFIG_DRM_AMD_DC_DCN)
11243         struct dsc_mst_fairness_vars vars[MAX_PIPES];
11244         struct drm_dp_mst_topology_state *mst_state;
11245         struct drm_dp_mst_topology_mgr *mgr;
11246 #endif
11247
11248         trace_amdgpu_dm_atomic_check_begin(state);
11249
11250         ret = drm_atomic_helper_check_modeset(dev, state);
11251         if (ret) {
11252                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11253                 goto fail;
11254         }
11255
11256         /* Check connector changes */
11257         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11258                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11259                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11260
11261                 /* Skip connectors that are disabled or part of modeset already. */
11262                 if (!old_con_state->crtc && !new_con_state->crtc)
11263                         continue;
11264
11265                 if (!new_con_state->crtc)
11266                         continue;
11267
11268                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11269                 if (IS_ERR(new_crtc_state)) {
11270                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11271                         ret = PTR_ERR(new_crtc_state);
11272                         goto fail;
11273                 }
11274
11275                 if (dm_old_con_state->abm_level !=
11276                     dm_new_con_state->abm_level)
11277                         new_crtc_state->connectors_changed = true;
11278         }
11279
11280 #if defined(CONFIG_DRM_AMD_DC_DCN)
11281         if (dc_resource_is_dsc_encoding_supported(dc)) {
11282                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11283                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11284                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11285                                 if (ret) {
11286                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11287                                         goto fail;
11288                                 }
11289                         }
11290                 }
11291                 if (!pre_validate_dsc(state, &dm_state, vars)) {
11292                         ret = -EINVAL;
11293                         goto fail;
11294                 }
11295         }
11296 #endif
11297         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11298                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11299
11300                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11301                     !new_crtc_state->color_mgmt_changed &&
11302                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11303                         dm_old_crtc_state->dsc_force_changed == false)
11304                         continue;
11305
11306                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11307                 if (ret) {
11308                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11309                         goto fail;
11310                 }
11311
11312                 if (!new_crtc_state->enable)
11313                         continue;
11314
11315                 ret = drm_atomic_add_affected_connectors(state, crtc);
11316                 if (ret) {
11317                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11318                         goto fail;
11319                 }
11320
11321                 ret = drm_atomic_add_affected_planes(state, crtc);
11322                 if (ret) {
11323                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11324                         goto fail;
11325                 }
11326
11327                 if (dm_old_crtc_state->dsc_force_changed)
11328                         new_crtc_state->mode_changed = true;
11329         }
11330
11331         /*
11332          * Add all primary and overlay planes on the CRTC to the state
11333          * whenever a plane is enabled to maintain correct z-ordering
11334          * and to enable fast surface updates.
11335          */
11336         drm_for_each_crtc(crtc, dev) {
11337                 bool modified = false;
11338
11339                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11340                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11341                                 continue;
11342
11343                         if (new_plane_state->crtc == crtc ||
11344                             old_plane_state->crtc == crtc) {
11345                                 modified = true;
11346                                 break;
11347                         }
11348                 }
11349
11350                 if (!modified)
11351                         continue;
11352
11353                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11354                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11355                                 continue;
11356
11357                         new_plane_state =
11358                                 drm_atomic_get_plane_state(state, plane);
11359
11360                         if (IS_ERR(new_plane_state)) {
11361                                 ret = PTR_ERR(new_plane_state);
11362                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11363                                 goto fail;
11364                         }
11365                 }
11366         }
11367
11368         /* Remove exiting planes if they are modified */
11369         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11370                 ret = dm_update_plane_state(dc, state, plane,
11371                                             old_plane_state,
11372                                             new_plane_state,
11373                                             false,
11374                                             &lock_and_validation_needed);
11375                 if (ret) {
11376                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11377                         goto fail;
11378                 }
11379         }
11380
11381         /* Disable all crtcs which require disable */
11382         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11383                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11384                                            old_crtc_state,
11385                                            new_crtc_state,
11386                                            false,
11387                                            &lock_and_validation_needed);
11388                 if (ret) {
11389                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11390                         goto fail;
11391                 }
11392         }
11393
11394         /* Enable all crtcs which require enable */
11395         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11396                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11397                                            old_crtc_state,
11398                                            new_crtc_state,
11399                                            true,
11400                                            &lock_and_validation_needed);
11401                 if (ret) {
11402                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11403                         goto fail;
11404                 }
11405         }
11406
11407         /* Add new/modified planes */
11408         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11409                 ret = dm_update_plane_state(dc, state, plane,
11410                                             old_plane_state,
11411                                             new_plane_state,
11412                                             true,
11413                                             &lock_and_validation_needed);
11414                 if (ret) {
11415                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11416                         goto fail;
11417                 }
11418         }
11419
11420         /* Run this here since we want to validate the streams we created */
11421         ret = drm_atomic_helper_check_planes(dev, state);
11422         if (ret) {
11423                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11424                 goto fail;
11425         }
11426
11427         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11428                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11429                 if (dm_new_crtc_state->mpo_requested)
11430                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11431         }
11432
11433         /* Check cursor planes scaling */
11434         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11435                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11436                 if (ret) {
11437                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11438                         goto fail;
11439                 }
11440         }
11441
11442         if (state->legacy_cursor_update) {
11443                 /*
11444                  * This is a fast cursor update coming from the plane update
11445                  * helper, check if it can be done asynchronously for better
11446                  * performance.
11447                  */
11448                 state->async_update =
11449                         !drm_atomic_helper_async_check(dev, state);
11450
11451                 /*
11452                  * Skip the remaining global validation if this is an async
11453                  * update. Cursor updates can be done without affecting
11454                  * state or bandwidth calcs and this avoids the performance
11455                  * penalty of locking the private state object and
11456                  * allocating a new dc_state.
11457                  */
11458                 if (state->async_update)
11459                         return 0;
11460         }
11461
11462         /* Check scaling and underscan changes*/
11463         /* TODO Removed scaling changes validation due to inability to commit
11464          * new stream into context w\o causing full reset. Need to
11465          * decide how to handle.
11466          */
11467         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11468                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11469                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11470                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11471
11472                 /* Skip any modesets/resets */
11473                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11474                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11475                         continue;
11476
11477                 /* Skip any thing not scale or underscan changes */
11478                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11479                         continue;
11480
11481                 lock_and_validation_needed = true;
11482         }
11483
11484 #if defined(CONFIG_DRM_AMD_DC_DCN)
11485         /* set the slot info for each mst_state based on the link encoding format */
11486         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11487                 struct amdgpu_dm_connector *aconnector;
11488                 struct drm_connector *connector;
11489                 struct drm_connector_list_iter iter;
11490                 u8 link_coding_cap;
11491
11492                 if (!mgr->mst_state )
11493                         continue;
11494
11495                 drm_connector_list_iter_begin(dev, &iter);
11496                 drm_for_each_connector_iter(connector, &iter) {
11497                         int id = connector->index;
11498
11499                         if (id == mst_state->mgr->conn_base_id) {
11500                                 aconnector = to_amdgpu_dm_connector(connector);
11501                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11502                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11503
11504                                 break;
11505                         }
11506                 }
11507                 drm_connector_list_iter_end(&iter);
11508
11509         }
11510 #endif
11511         /**
11512          * Streams and planes are reset when there are changes that affect
11513          * bandwidth. Anything that affects bandwidth needs to go through
11514          * DC global validation to ensure that the configuration can be applied
11515          * to hardware.
11516          *
11517          * We have to currently stall out here in atomic_check for outstanding
11518          * commits to finish in this case because our IRQ handlers reference
11519          * DRM state directly - we can end up disabling interrupts too early
11520          * if we don't.
11521          *
11522          * TODO: Remove this stall and drop DM state private objects.
11523          */
11524         if (lock_and_validation_needed) {
11525                 ret = dm_atomic_get_state(state, &dm_state);
11526                 if (ret) {
11527                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11528                         goto fail;
11529                 }
11530
11531                 ret = do_aquire_global_lock(dev, state);
11532                 if (ret) {
11533                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11534                         goto fail;
11535                 }
11536
11537 #if defined(CONFIG_DRM_AMD_DC_DCN)
11538                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11539                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11540                         ret = -EINVAL;
11541                         goto fail;
11542                 }
11543
11544                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11545                 if (ret) {
11546                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11547                         goto fail;
11548                 }
11549 #endif
11550
11551                 /*
11552                  * Perform validation of MST topology in the state:
11553                  * We need to perform MST atomic check before calling
11554                  * dc_validate_global_state(), or there is a chance
11555                  * to get stuck in an infinite loop and hang eventually.
11556                  */
11557                 ret = drm_dp_mst_atomic_check(state);
11558                 if (ret) {
11559                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11560                         goto fail;
11561                 }
11562                 status = dc_validate_global_state(dc, dm_state->context, true);
11563                 if (status != DC_OK) {
11564                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11565                                        dc_status_to_str(status), status);
11566                         ret = -EINVAL;
11567                         goto fail;
11568                 }
11569         } else {
11570                 /*
11571                  * The commit is a fast update. Fast updates shouldn't change
11572                  * the DC context, affect global validation, and can have their
11573                  * commit work done in parallel with other commits not touching
11574                  * the same resource. If we have a new DC context as part of
11575                  * the DM atomic state from validation we need to free it and
11576                  * retain the existing one instead.
11577                  *
11578                  * Furthermore, since the DM atomic state only contains the DC
11579                  * context and can safely be annulled, we can free the state
11580                  * and clear the associated private object now to free
11581                  * some memory and avoid a possible use-after-free later.
11582                  */
11583
11584                 for (i = 0; i < state->num_private_objs; i++) {
11585                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11586
11587                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11588                                 int j = state->num_private_objs-1;
11589
11590                                 dm_atomic_destroy_state(obj,
11591                                                 state->private_objs[i].state);
11592
11593                                 /* If i is not at the end of the array then the
11594                                  * last element needs to be moved to where i was
11595                                  * before the array can safely be truncated.
11596                                  */
11597                                 if (i != j)
11598                                         state->private_objs[i] =
11599                                                 state->private_objs[j];
11600
11601                                 state->private_objs[j].ptr = NULL;
11602                                 state->private_objs[j].state = NULL;
11603                                 state->private_objs[j].old_state = NULL;
11604                                 state->private_objs[j].new_state = NULL;
11605
11606                                 state->num_private_objs = j;
11607                                 break;
11608                         }
11609                 }
11610         }
11611
11612         /* Store the overall update type for use later in atomic check. */
11613         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11614                 struct dm_crtc_state *dm_new_crtc_state =
11615                         to_dm_crtc_state(new_crtc_state);
11616
11617                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11618                                                          UPDATE_TYPE_FULL :
11619                                                          UPDATE_TYPE_FAST;
11620         }
11621
11622         /* Must be success */
11623         WARN_ON(ret);
11624
11625         trace_amdgpu_dm_atomic_check_finish(state, ret);
11626
11627         return ret;
11628
11629 fail:
11630         if (ret == -EDEADLK)
11631                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11632         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11633                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11634         else
11635                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11636
11637         trace_amdgpu_dm_atomic_check_finish(state, ret);
11638
11639         return ret;
11640 }
11641
11642 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11643                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11644 {
11645         uint8_t dpcd_data;
11646         bool capable = false;
11647
11648         if (amdgpu_dm_connector->dc_link &&
11649                 dm_helpers_dp_read_dpcd(
11650                                 NULL,
11651                                 amdgpu_dm_connector->dc_link,
11652                                 DP_DOWN_STREAM_PORT_COUNT,
11653                                 &dpcd_data,
11654                                 sizeof(dpcd_data))) {
11655                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11656         }
11657
11658         return capable;
11659 }
11660
11661 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11662                 unsigned int offset,
11663                 unsigned int total_length,
11664                 uint8_t *data,
11665                 unsigned int length,
11666                 struct amdgpu_hdmi_vsdb_info *vsdb)
11667 {
11668         bool res;
11669         union dmub_rb_cmd cmd;
11670         struct dmub_cmd_send_edid_cea *input;
11671         struct dmub_cmd_edid_cea_output *output;
11672
11673         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11674                 return false;
11675
11676         memset(&cmd, 0, sizeof(cmd));
11677
11678         input = &cmd.edid_cea.data.input;
11679
11680         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11681         cmd.edid_cea.header.sub_type = 0;
11682         cmd.edid_cea.header.payload_bytes =
11683                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11684         input->offset = offset;
11685         input->length = length;
11686         input->cea_total_length = total_length;
11687         memcpy(input->payload, data, length);
11688
11689         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11690         if (!res) {
11691                 DRM_ERROR("EDID CEA parser failed\n");
11692                 return false;
11693         }
11694
11695         output = &cmd.edid_cea.data.output;
11696
11697         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11698                 if (!output->ack.success) {
11699                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11700                                         output->ack.offset);
11701                 }
11702         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11703                 if (!output->amd_vsdb.vsdb_found)
11704                         return false;
11705
11706                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11707                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11708                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11709                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11710         } else {
11711                 DRM_WARN("Unknown EDID CEA parser results\n");
11712                 return false;
11713         }
11714
11715         return true;
11716 }
11717
11718 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11719                 uint8_t *edid_ext, int len,
11720                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11721 {
11722         int i;
11723
11724         /* send extension block to DMCU for parsing */
11725         for (i = 0; i < len; i += 8) {
11726                 bool res;
11727                 int offset;
11728
11729                 /* send 8 bytes a time */
11730                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11731                         return false;
11732
11733                 if (i+8 == len) {
11734                         /* EDID block sent completed, expect result */
11735                         int version, min_rate, max_rate;
11736
11737                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11738                         if (res) {
11739                                 /* amd vsdb found */
11740                                 vsdb_info->freesync_supported = 1;
11741                                 vsdb_info->amd_vsdb_version = version;
11742                                 vsdb_info->min_refresh_rate_hz = min_rate;
11743                                 vsdb_info->max_refresh_rate_hz = max_rate;
11744                                 return true;
11745                         }
11746                         /* not amd vsdb */
11747                         return false;
11748                 }
11749
11750                 /* check for ack*/
11751                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11752                 if (!res)
11753                         return false;
11754         }
11755
11756         return false;
11757 }
11758
11759 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11760                 uint8_t *edid_ext, int len,
11761                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11762 {
11763         int i;
11764
11765         /* send extension block to DMCU for parsing */
11766         for (i = 0; i < len; i += 8) {
11767                 /* send 8 bytes a time */
11768                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11769                         return false;
11770         }
11771
11772         return vsdb_info->freesync_supported;
11773 }
11774
11775 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11776                 uint8_t *edid_ext, int len,
11777                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11778 {
11779         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11780
11781         if (adev->dm.dmub_srv)
11782                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11783         else
11784                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11785 }
11786
11787 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11788                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11789 {
11790         uint8_t *edid_ext = NULL;
11791         int i;
11792         bool valid_vsdb_found = false;
11793
11794         /*----- drm_find_cea_extension() -----*/
11795         /* No EDID or EDID extensions */
11796         if (edid == NULL || edid->extensions == 0)
11797                 return -ENODEV;
11798
11799         /* Find CEA extension */
11800         for (i = 0; i < edid->extensions; i++) {
11801                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11802                 if (edid_ext[0] == CEA_EXT)
11803                         break;
11804         }
11805
11806         if (i == edid->extensions)
11807                 return -ENODEV;
11808
11809         /*----- cea_db_offsets() -----*/
11810         if (edid_ext[0] != CEA_EXT)
11811                 return -ENODEV;
11812
11813         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11814
11815         return valid_vsdb_found ? i : -ENODEV;
11816 }
11817
11818 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11819                                         struct edid *edid)
11820 {
11821         int i = 0;
11822         struct detailed_timing *timing;
11823         struct detailed_non_pixel *data;
11824         struct detailed_data_monitor_range *range;
11825         struct amdgpu_dm_connector *amdgpu_dm_connector =
11826                         to_amdgpu_dm_connector(connector);
11827         struct dm_connector_state *dm_con_state = NULL;
11828         struct dc_sink *sink;
11829
11830         struct drm_device *dev = connector->dev;
11831         struct amdgpu_device *adev = drm_to_adev(dev);
11832         bool freesync_capable = false;
11833         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11834
11835         if (!connector->state) {
11836                 DRM_ERROR("%s - Connector has no state", __func__);
11837                 goto update;
11838         }
11839
11840         sink = amdgpu_dm_connector->dc_sink ?
11841                 amdgpu_dm_connector->dc_sink :
11842                 amdgpu_dm_connector->dc_em_sink;
11843
11844         if (!edid || !sink) {
11845                 dm_con_state = to_dm_connector_state(connector->state);
11846
11847                 amdgpu_dm_connector->min_vfreq = 0;
11848                 amdgpu_dm_connector->max_vfreq = 0;
11849                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11850                 connector->display_info.monitor_range.min_vfreq = 0;
11851                 connector->display_info.monitor_range.max_vfreq = 0;
11852                 freesync_capable = false;
11853
11854                 goto update;
11855         }
11856
11857         dm_con_state = to_dm_connector_state(connector->state);
11858
11859         if (!adev->dm.freesync_module)
11860                 goto update;
11861
11862
11863         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11864                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11865                 bool edid_check_required = false;
11866
11867                 if (edid) {
11868                         edid_check_required = is_dp_capable_without_timing_msa(
11869                                                 adev->dm.dc,
11870                                                 amdgpu_dm_connector);
11871                 }
11872
11873                 if (edid_check_required == true && (edid->version > 1 ||
11874                    (edid->version == 1 && edid->revision > 1))) {
11875                         for (i = 0; i < 4; i++) {
11876
11877                                 timing  = &edid->detailed_timings[i];
11878                                 data    = &timing->data.other_data;
11879                                 range   = &data->data.range;
11880                                 /*
11881                                  * Check if monitor has continuous frequency mode
11882                                  */
11883                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11884                                         continue;
11885                                 /*
11886                                  * Check for flag range limits only. If flag == 1 then
11887                                  * no additional timing information provided.
11888                                  * Default GTF, GTF Secondary curve and CVT are not
11889                                  * supported
11890                                  */
11891                                 if (range->flags != 1)
11892                                         continue;
11893
11894                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11895                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11896                                 amdgpu_dm_connector->pixel_clock_mhz =
11897                                         range->pixel_clock_mhz * 10;
11898
11899                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11900                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11901
11902                                 break;
11903                         }
11904
11905                         if (amdgpu_dm_connector->max_vfreq -
11906                             amdgpu_dm_connector->min_vfreq > 10) {
11907
11908                                 freesync_capable = true;
11909                         }
11910                 }
11911         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11912                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11913                 if (i >= 0 && vsdb_info.freesync_supported) {
11914                         timing  = &edid->detailed_timings[i];
11915                         data    = &timing->data.other_data;
11916
11917                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11918                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11919                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11920                                 freesync_capable = true;
11921
11922                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11923                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11924                 }
11925         }
11926
11927 update:
11928         if (dm_con_state)
11929                 dm_con_state->freesync_capable = freesync_capable;
11930
11931         if (connector->vrr_capable_property)
11932                 drm_connector_set_vrr_capable_property(connector,
11933                                                        freesync_capable);
11934 }
11935
11936 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11937 {
11938         struct amdgpu_device *adev = drm_to_adev(dev);
11939         struct dc *dc = adev->dm.dc;
11940         int i;
11941
11942         mutex_lock(&adev->dm.dc_lock);
11943         if (dc->current_state) {
11944                 for (i = 0; i < dc->current_state->stream_count; ++i)
11945                         dc->current_state->streams[i]
11946                                 ->triggered_crtc_reset.enabled =
11947                                 adev->dm.force_timing_sync;
11948
11949                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11950                 dc_trigger_sync(dc, dc->current_state);
11951         }
11952         mutex_unlock(&adev->dm.dc_lock);
11953 }
11954
11955 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11956                        uint32_t value, const char *func_name)
11957 {
11958 #ifdef DM_CHECK_ADDR_0
11959         if (address == 0) {
11960                 DC_ERR("invalid register write. address = 0");
11961                 return;
11962         }
11963 #endif
11964         cgs_write_register(ctx->cgs_device, address, value);
11965         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11966 }
11967
11968 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11969                           const char *func_name)
11970 {
11971         uint32_t value;
11972 #ifdef DM_CHECK_ADDR_0
11973         if (address == 0) {
11974                 DC_ERR("invalid register read; address = 0\n");
11975                 return 0;
11976         }
11977 #endif
11978
11979         if (ctx->dmub_srv &&
11980             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11981             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11982                 ASSERT(false);
11983                 return 0;
11984         }
11985
11986         value = cgs_read_register(ctx->cgs_device, address);
11987
11988         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11989
11990         return value;
11991 }
11992
11993 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11994                                                 struct dc_context *ctx,
11995                                                 uint8_t status_type,
11996                                                 uint32_t *operation_result)
11997 {
11998         struct amdgpu_device *adev = ctx->driver_context;
11999         int return_status = -1;
12000         struct dmub_notification *p_notify = adev->dm.dmub_notify;
12001
12002         if (is_cmd_aux) {
12003                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12004                         return_status = p_notify->aux_reply.length;
12005                         *operation_result = p_notify->result;
12006                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
12007                         *operation_result = AUX_RET_ERROR_TIMEOUT;
12008                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
12009                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12010                 } else {
12011                         *operation_result = AUX_RET_ERROR_UNKNOWN;
12012                 }
12013         } else {
12014                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12015                         return_status = 0;
12016                         *operation_result = p_notify->sc_status;
12017                 } else {
12018                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
12019                 }
12020         }
12021
12022         return return_status;
12023 }
12024
12025 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
12026         unsigned int link_index, void *cmd_payload, void *operation_result)
12027 {
12028         struct amdgpu_device *adev = ctx->driver_context;
12029         int ret = 0;
12030
12031         if (is_cmd_aux) {
12032                 dc_process_dmub_aux_transfer_async(ctx->dc,
12033                         link_index, (struct aux_payload *)cmd_payload);
12034         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12035                                         (struct set_config_cmd_payload *)cmd_payload,
12036                                         adev->dm.dmub_notify)) {
12037                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12038                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12039                                         (uint32_t *)operation_result);
12040         }
12041
12042         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12043         if (ret == 0) {
12044                 DRM_ERROR("wait_for_completion_timeout timeout!");
12045                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12046                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12047                                 (uint32_t *)operation_result);
12048         }
12049
12050         if (is_cmd_aux) {
12051                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12052                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12053
12054                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12055                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12056                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12057                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12058                                        adev->dm.dmub_notify->aux_reply.length);
12059                         }
12060                 }
12061         }
12062
12063         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12064                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12065                         (uint32_t *)operation_result);
12066 }
12067
12068 /*
12069  * Check whether seamless boot is supported.
12070  *
12071  * So far we only support seamless boot on CHIP_VANGOGH.
12072  * If everything goes well, we may consider expanding
12073  * seamless boot to other ASICs.
12074  */
12075 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12076 {
12077         switch (adev->asic_type) {
12078         case CHIP_VANGOGH:
12079                 if (!adev->mman.keep_stolen_vga_memory)
12080                         return true;
12081                 break;
12082         default:
12083                 break;
12084         }
12085
12086         return false;
12087 }
This page took 0.772987 seconds and 4 git commands to generate.