2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
65 #include "amdgpu_dm_psr.h"
67 #include "ivsrcid/ivsrcid_vislands30.h"
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fb_helper.h>
86 #include <drm/drm_fourcc.h>
87 #include <drm/drm_edid.h>
88 #include <drm/drm_vblank.h>
89 #include <drm/drm_audio_component.h>
90 #include <drm/drm_gem_atomic_helper.h>
92 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
94 #include "dcn/dcn_1_0_offset.h"
95 #include "dcn/dcn_1_0_sh_mask.h"
96 #include "soc15_hw_ip.h"
97 #include "soc15_common.h"
98 #include "vega10_ip_offset.h"
100 #include "soc15_common.h"
102 #include "gc/gc_11_0_0_offset.h"
103 #include "gc/gc_11_0_0_sh_mask.h"
105 #include "modules/inc/mod_freesync.h"
106 #include "modules/power/power_helpers.h"
107 #include "modules/inc/mod_info_packet.h"
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
137 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
140 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
143 /* Number of bytes in PSP header for firmware. */
144 #define PSP_HEADER_BYTES 0x100
146 /* Number of bytes in PSP footer for firmware. */
147 #define PSP_FOOTER_BYTES 0x100
152 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
153 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
154 * requests into DC requests, and DC responses into DRM responses.
156 * The root control structure is &struct amdgpu_display_manager.
159 /* basic init/fini API */
160 static int amdgpu_dm_init(struct amdgpu_device *adev);
161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
166 switch (link->dpcd_caps.dongle_type) {
167 case DISPLAY_DONGLE_NONE:
168 return DRM_MODE_SUBCONNECTOR_Native;
169 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
170 return DRM_MODE_SUBCONNECTOR_VGA;
171 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
172 case DISPLAY_DONGLE_DP_DVI_DONGLE:
173 return DRM_MODE_SUBCONNECTOR_DVID;
174 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
175 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
176 return DRM_MODE_SUBCONNECTOR_HDMIA;
177 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
179 return DRM_MODE_SUBCONNECTOR_Unknown;
183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
185 struct dc_link *link = aconnector->dc_link;
186 struct drm_connector *connector = &aconnector->base;
187 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
189 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
192 if (aconnector->dc_sink)
193 subconnector = get_subconnector_type(link);
195 drm_object_property_set_value(&connector->base,
196 connector->dev->mode_config.dp_subconnector_property,
201 * initializes drm_device display related structures, based on the information
202 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
203 * drm_encoder, drm_mode_config
205 * Returns 0 on success
207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
208 /* removes and deallocates the drm structures, created by the above function */
209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 struct amdgpu_dm_connector *amdgpu_dm_connector,
214 struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 struct amdgpu_encoder *aencoder,
217 uint32_t link_index);
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 struct drm_atomic_state *state);
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231 struct drm_crtc_state *new_crtc_state);
233 * dm_vblank_get_counter
236 * Get counter for number of vertical blanks
239 * struct amdgpu_device *adev - [in] desired amdgpu device
240 * int disp_idx - [in] which CRTC to get the counter from
243 * Counter for vertical blanks
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
247 if (crtc >= adev->mode_info.num_crtc)
250 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
252 if (acrtc->dm_irq_params.stream == NULL) {
253 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
258 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263 u32 *vbl, u32 *position)
265 uint32_t v_blank_start, v_blank_end, h_position, v_position;
267 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
270 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
272 if (acrtc->dm_irq_params.stream == NULL) {
273 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
279 * TODO rework base driver to use values directly.
280 * for now parse it back into reg-format
282 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
288 *position = v_position | (h_position << 16);
289 *vbl = v_blank_start | (v_blank_end << 16);
295 static bool dm_is_idle(void *handle)
301 static int dm_wait_for_idle(void *handle)
307 static bool dm_check_soft_reset(void *handle)
312 static int dm_soft_reset(void *handle)
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
322 struct drm_device *dev = adev_to_drm(adev);
323 struct drm_crtc *crtc;
324 struct amdgpu_crtc *amdgpu_crtc;
326 if (WARN_ON(otg_inst == -1))
327 return adev->mode_info.crtcs[0];
329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330 amdgpu_crtc = to_amdgpu_crtc(crtc);
332 if (amdgpu_crtc->otg_inst == otg_inst)
339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
340 struct dm_crtc_state *new_state)
342 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
344 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 * dm_pflip_high_irq() - Handle pageflip interrupt
352 * @interrupt_params: ignored
354 * Handles the pageflip interrupt by notifying all interested parties
355 * that the pageflip has been completed.
357 static void dm_pflip_high_irq(void *interrupt_params)
359 struct amdgpu_crtc *amdgpu_crtc;
360 struct common_irq_params *irq_params = interrupt_params;
361 struct amdgpu_device *adev = irq_params->adev;
363 struct drm_pending_vblank_event *e;
364 uint32_t vpos, hpos, v_blank_start, v_blank_end;
367 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
369 /* IRQ could occur when in initial stage */
370 /* TODO work and BO cleanup */
371 if (amdgpu_crtc == NULL) {
372 DC_LOG_PFLIP("CRTC is null, returning.\n");
376 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
378 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
379 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
380 amdgpu_crtc->pflip_status,
381 AMDGPU_FLIP_SUBMITTED,
382 amdgpu_crtc->crtc_id,
384 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 /* page flip completed. */
389 e = amdgpu_crtc->event;
390 amdgpu_crtc->event = NULL;
394 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
396 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
398 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
399 &v_blank_end, &hpos, &vpos) ||
400 (vpos < v_blank_start)) {
401 /* Update to correct count and vblank timestamp if racing with
402 * vblank irq. This also updates to the correct vblank timestamp
403 * even in VRR mode, as scanout is past the front-porch atm.
405 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
407 /* Wake up userspace by sending the pageflip event with proper
408 * count and timestamp of vblank of flip completion.
411 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
413 /* Event sent, so done with vblank for this flip */
414 drm_crtc_vblank_put(&amdgpu_crtc->base);
417 /* VRR active and inside front-porch: vblank count and
418 * timestamp for pageflip event will only be up to date after
419 * drm_crtc_handle_vblank() has been executed from late vblank
420 * irq handler after start of back-porch (vline 0). We queue the
421 * pageflip event for send-out by drm_crtc_handle_vblank() with
422 * updated timestamp and count, once it runs after us.
424 * We need to open-code this instead of using the helper
425 * drm_crtc_arm_vblank_event(), as that helper would
426 * call drm_crtc_accurate_vblank_count(), which we must
427 * not call in VRR mode while we are in front-porch!
430 /* sequence will be replaced by real count during send-out. */
431 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
432 e->pipe = amdgpu_crtc->crtc_id;
434 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 /* Keep track of vblank of this flip for flip throttling. We use the
439 * cooked hw counter, as that one incremented at start of this vblank
440 * of pageflip completion, so last_flip_vblank is the forbidden count
441 * for queueing new pageflips if vsync + VRR is enabled.
443 amdgpu_crtc->dm_irq_params.last_flip_vblank =
444 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
446 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
447 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
449 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
450 amdgpu_crtc->crtc_id, amdgpu_crtc,
451 vrr_active, (int) !e);
454 static void dm_vupdate_high_irq(void *interrupt_params)
456 struct common_irq_params *irq_params = interrupt_params;
457 struct amdgpu_device *adev = irq_params->adev;
458 struct amdgpu_crtc *acrtc;
459 struct drm_device *drm_dev;
460 struct drm_vblank_crtc *vblank;
461 ktime_t frame_duration_ns, previous_timestamp;
465 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
468 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469 drm_dev = acrtc->base.dev;
470 vblank = &drm_dev->vblank[acrtc->base.index];
471 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
472 frame_duration_ns = vblank->time - previous_timestamp;
474 if (frame_duration_ns > 0) {
475 trace_amdgpu_refresh_rate_track(acrtc->base.index,
477 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
478 atomic64_set(&irq_params->previous_timestamp, vblank->time);
481 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 /* Core vblank handling is done here after end of front-porch in
486 * vrr mode, as vblank timestamping will give valid results
487 * while now done after front-porch. This will also deliver
488 * page-flip completion events that have been queued to us
489 * if a pageflip happened inside front-porch.
492 dm_crtc_handle_vblank(acrtc);
494 /* BTR processing for pre-DCE12 ASICs */
495 if (acrtc->dm_irq_params.stream &&
496 adev->family < AMDGPU_FAMILY_AI) {
497 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
498 mod_freesync_handle_v_update(
499 adev->dm.freesync_module,
500 acrtc->dm_irq_params.stream,
501 &acrtc->dm_irq_params.vrr_params);
503 dc_stream_adjust_vmin_vmax(
505 acrtc->dm_irq_params.stream,
506 &acrtc->dm_irq_params.vrr_params.adjust);
507 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 * dm_crtc_high_irq() - Handles CRTC interrupt
515 * @interrupt_params: used for determining the CRTC instance
517 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
520 static void dm_crtc_high_irq(void *interrupt_params)
522 struct common_irq_params *irq_params = interrupt_params;
523 struct amdgpu_device *adev = irq_params->adev;
524 struct amdgpu_crtc *acrtc;
528 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
534 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
535 vrr_active, acrtc->dm_irq_params.active_planes);
538 * Core vblank handling at start of front-porch is only possible
539 * in non-vrr mode, as only there vblank timestamping will give
540 * valid results while done in front-porch. Otherwise defer it
541 * to dm_vupdate_high_irq after end of front-porch.
544 dm_crtc_handle_vblank(acrtc);
547 * Following stuff must happen at start of vblank, for crc
548 * computation and below-the-range btr support in vrr mode.
550 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
552 /* BTR updates need to happen before VUPDATE on Vega and above. */
553 if (adev->family < AMDGPU_FAMILY_AI)
556 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
558 if (acrtc->dm_irq_params.stream &&
559 acrtc->dm_irq_params.vrr_params.supported &&
560 acrtc->dm_irq_params.freesync_config.state ==
561 VRR_STATE_ACTIVE_VARIABLE) {
562 mod_freesync_handle_v_update(adev->dm.freesync_module,
563 acrtc->dm_irq_params.stream,
564 &acrtc->dm_irq_params.vrr_params);
566 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
567 &acrtc->dm_irq_params.vrr_params.adjust);
571 * If there aren't any active_planes then DCH HUBP may be clock-gated.
572 * In that case, pageflip completion interrupts won't fire and pageflip
573 * completion events won't get delivered. Prevent this by sending
574 * pending pageflip events from here if a flip is still pending.
576 * If any planes are enabled, use dm_pflip_high_irq() instead, to
577 * avoid race conditions between flip programming and completion,
578 * which could cause too early flip completion events.
580 if (adev->family >= AMDGPU_FAMILY_RV &&
581 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
582 acrtc->dm_irq_params.active_planes == 0) {
584 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
586 drm_crtc_vblank_put(&acrtc->base);
588 acrtc->pflip_status = AMDGPU_FLIP_NONE;
591 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
596 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
597 * DCN generation ASICs
598 * @interrupt_params: interrupt parameters
600 * Used to set crc window/read out crc value at vertical line 0 position
602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
604 struct common_irq_params *irq_params = interrupt_params;
605 struct amdgpu_device *adev = irq_params->adev;
606 struct amdgpu_crtc *acrtc;
608 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
615 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
618 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
619 * @adev: amdgpu_device pointer
620 * @notify: dmub notification structure
622 * Dmub AUX or SET_CONFIG command completion processing callback
623 * Copies dmub notification to DM which is to be read by AUX command.
624 * issuing thread and also signals the event to wake up the thread.
626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
627 struct dmub_notification *notify)
629 if (adev->dm.dmub_notify)
630 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
631 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
632 complete(&adev->dm.dmub_aux_transfer_done);
636 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
637 * @adev: amdgpu_device pointer
638 * @notify: dmub notification structure
640 * Dmub Hpd interrupt processing callback. Gets displayindex through the
641 * ink index and calls helper to do the processing.
643 static void dmub_hpd_callback(struct amdgpu_device *adev,
644 struct dmub_notification *notify)
646 struct amdgpu_dm_connector *aconnector;
647 struct amdgpu_dm_connector *hpd_aconnector = NULL;
648 struct drm_connector *connector;
649 struct drm_connector_list_iter iter;
650 struct dc_link *link;
651 uint8_t link_index = 0;
652 struct drm_device *dev;
657 if (notify == NULL) {
658 DRM_ERROR("DMUB HPD callback notification was NULL");
662 if (notify->link_index > adev->dm.dc->link_count) {
663 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
667 link_index = notify->link_index;
668 link = adev->dm.dc->links[link_index];
671 drm_connector_list_iter_begin(dev, &iter);
672 drm_for_each_connector_iter(connector, &iter) {
673 aconnector = to_amdgpu_dm_connector(connector);
674 if (link && aconnector->dc_link == link) {
675 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
676 hpd_aconnector = aconnector;
680 drm_connector_list_iter_end(&iter);
682 if (hpd_aconnector) {
683 if (notify->type == DMUB_NOTIFICATION_HPD)
684 handle_hpd_irq_helper(hpd_aconnector);
685 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
686 handle_hpd_rx_irq(hpd_aconnector);
691 * register_dmub_notify_callback - Sets callback for DMUB notify
692 * @adev: amdgpu_device pointer
693 * @type: Type of dmub notification
694 * @callback: Dmub interrupt callback function
695 * @dmub_int_thread_offload: offload indicator
697 * API to register a dmub callback handler for a dmub notification
698 * Also sets indicator whether callback processing to be offloaded.
699 * to dmub interrupt handling thread
700 * Return: true if successfully registered, false if there is existing registration
702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
703 enum dmub_notification_type type,
704 dmub_notify_interrupt_callback_t callback,
705 bool dmub_int_thread_offload)
707 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
708 adev->dm.dmub_callback[type] = callback;
709 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
716 static void dm_handle_hpd_work(struct work_struct *work)
718 struct dmub_hpd_work *dmub_hpd_wrk;
720 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
722 if (!dmub_hpd_wrk->dmub_notify) {
723 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
728 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
729 dmub_hpd_wrk->dmub_notify);
732 kfree(dmub_hpd_wrk->dmub_notify);
737 #define DMUB_TRACE_MAX_READ 64
739 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740 * @interrupt_params: used for determining the Outbox instance
742 * Handles the Outbox Interrupt
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
747 struct dmub_notification notify;
748 struct common_irq_params *irq_params = interrupt_params;
749 struct amdgpu_device *adev = irq_params->adev;
750 struct amdgpu_display_manager *dm = &adev->dm;
751 struct dmcub_trace_buf_entry entry = { 0 };
753 struct dmub_hpd_work *dmub_hpd_wrk;
754 struct dc_link *plink = NULL;
756 if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
760 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
761 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
762 DRM_ERROR("DM: notify type %d invalid!", notify.type);
765 if (!dm->dmub_callback[notify.type]) {
766 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
769 if (dm->dmub_thread_offload[notify.type] == true) {
770 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
772 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
775 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
776 if (!dmub_hpd_wrk->dmub_notify) {
778 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
781 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
782 if (dmub_hpd_wrk->dmub_notify)
783 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
784 dmub_hpd_wrk->adev = adev;
785 if (notify.type == DMUB_NOTIFICATION_HPD) {
786 plink = adev->dm.dc->links[notify.link_index];
789 notify.hpd_status == DP_HPD_PLUG;
792 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
794 dm->dmub_callback[notify.type](adev, ¬ify);
796 } while (notify.pending_notification);
801 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
802 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
803 entry.param0, entry.param1);
805 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
806 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
812 } while (count <= DMUB_TRACE_MAX_READ);
814 if (count > DMUB_TRACE_MAX_READ)
815 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
818 static int dm_set_clockgating_state(void *handle,
819 enum amd_clockgating_state state)
824 static int dm_set_powergating_state(void *handle,
825 enum amd_powergating_state state)
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
833 /* Allocate memory for FBC compressed data */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
836 struct drm_device *dev = connector->dev;
837 struct amdgpu_device *adev = drm_to_adev(dev);
838 struct dm_compressor_info *compressor = &adev->dm.compressor;
839 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840 struct drm_display_mode *mode;
841 unsigned long max_size = 0;
843 if (adev->dm.dc->fbc_compressor == NULL)
846 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
849 if (compressor->bo_ptr)
853 list_for_each_entry(mode, &connector->modes, head) {
854 if (max_size < mode->htotal * mode->vtotal)
855 max_size = mode->htotal * mode->vtotal;
859 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861 &compressor->gpu_addr, &compressor->cpu_addr);
864 DRM_ERROR("DM: Failed to initialize FBC\n");
866 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875 int pipe, bool *enabled,
876 unsigned char *buf, int max_bytes)
878 struct drm_device *dev = dev_get_drvdata(kdev);
879 struct amdgpu_device *adev = drm_to_adev(dev);
880 struct drm_connector *connector;
881 struct drm_connector_list_iter conn_iter;
882 struct amdgpu_dm_connector *aconnector;
887 mutex_lock(&adev->dm.audio_lock);
889 drm_connector_list_iter_begin(dev, &conn_iter);
890 drm_for_each_connector_iter(connector, &conn_iter) {
891 aconnector = to_amdgpu_dm_connector(connector);
892 if (aconnector->audio_inst != port)
896 ret = drm_eld_size(connector->eld);
897 memcpy(buf, connector->eld, min(max_bytes, ret));
901 drm_connector_list_iter_end(&conn_iter);
903 mutex_unlock(&adev->dm.audio_lock);
905 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911 .get_eld = amdgpu_dm_audio_component_get_eld,
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915 struct device *hda_kdev, void *data)
917 struct drm_device *dev = dev_get_drvdata(kdev);
918 struct amdgpu_device *adev = drm_to_adev(dev);
919 struct drm_audio_component *acomp = data;
921 acomp->ops = &amdgpu_dm_audio_component_ops;
923 adev->dm.audio_component = acomp;
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929 struct device *hda_kdev, void *data)
931 struct drm_device *dev = dev_get_drvdata(kdev);
932 struct amdgpu_device *adev = drm_to_adev(dev);
933 struct drm_audio_component *acomp = data;
937 adev->dm.audio_component = NULL;
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941 .bind = amdgpu_dm_audio_component_bind,
942 .unbind = amdgpu_dm_audio_component_unbind,
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
952 adev->mode_info.audio.enabled = true;
954 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
956 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957 adev->mode_info.audio.pin[i].channels = -1;
958 adev->mode_info.audio.pin[i].rate = -1;
959 adev->mode_info.audio.pin[i].bits_per_sample = -1;
960 adev->mode_info.audio.pin[i].status_bits = 0;
961 adev->mode_info.audio.pin[i].category_code = 0;
962 adev->mode_info.audio.pin[i].connected = false;
963 adev->mode_info.audio.pin[i].id =
964 adev->dm.dc->res_pool->audios[i]->inst;
965 adev->mode_info.audio.pin[i].offset = 0;
968 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
972 adev->dm.audio_registered = true;
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
982 if (!adev->mode_info.audio.enabled)
985 if (adev->dm.audio_registered) {
986 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987 adev->dm.audio_registered = false;
990 /* TODO: Disable audio? */
992 adev->mode_info.audio.enabled = false;
995 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
997 struct drm_audio_component *acomp = adev->dm.audio_component;
999 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1002 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1009 const struct dmcub_firmware_header_v1_0 *hdr;
1010 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 struct abm *abm = adev->dm.dc->res_pool->abm;
1015 struct dmub_srv_hw_params hw_params;
1016 enum dmub_status status;
1017 const unsigned char *fw_inst_const, *fw_bss_data;
1018 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019 bool has_hw_support;
1022 /* DMUB isn't supported on the ASIC. */
1026 DRM_ERROR("No framebuffer info for DMUB service.\n");
1031 /* Firmware required for DMUB support. */
1032 DRM_ERROR("No firmware provided for DMUB.\n");
1036 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037 if (status != DMUB_STATUS_OK) {
1038 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1042 if (!has_hw_support) {
1043 DRM_INFO("DMUB unsupported on ASIC\n");
1047 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1048 status = dmub_srv_hw_reset(dmub_srv);
1049 if (status != DMUB_STATUS_OK)
1050 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1052 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054 fw_inst_const = dmub_fw->data +
1055 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1058 fw_bss_data = dmub_fw->data +
1059 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 le32_to_cpu(hdr->inst_const_bytes);
1062 /* Copy firmware and bios info into FB memory. */
1063 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 * will be done by dm_dmub_hw_init
1073 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 fw_inst_const_size);
1078 if (fw_bss_data_size)
1079 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 fw_bss_data, fw_bss_data_size);
1082 /* Copy firmware bios info into FB memory. */
1083 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1086 /* Reset regions that need to be reset. */
1087 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096 /* Initialize hardware. */
1097 memset(&hw_params, 0, sizeof(hw_params));
1098 hw_params.fb_base = adev->gmc.fb_start;
1099 hw_params.fb_offset = adev->gmc.aper_base;
1101 /* backdoor load firmware and trigger dmub running */
1102 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 hw_params.load_inst_const = true;
1106 hw_params.psp_version = dmcu->psp_version;
1108 for (i = 0; i < fb_info->num_fb; ++i)
1109 hw_params.fb[i] = &fb_info->fb[i];
1111 switch (adev->ip_versions[DCE_HWIP][0]) {
1112 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1113 hw_params.dpia_supported = true;
1114 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1120 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 if (status != DMUB_STATUS_OK) {
1122 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1126 /* Wait for firmware load to finish. */
1127 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 if (status != DMUB_STATUS_OK)
1129 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1131 /* Init DMCU and ABM if available. */
1133 dmcu->funcs->dmcu_init(dmcu);
1134 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1137 if (!adev->dm.dc->ctx->dmub_srv)
1138 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1139 if (!adev->dm.dc->ctx->dmub_srv) {
1140 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1144 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 adev->dm.dmcub_fw_version);
1150 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1152 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1153 enum dmub_status status;
1157 /* DMUB isn't supported on the ASIC. */
1161 status = dmub_srv_is_hw_init(dmub_srv, &init);
1162 if (status != DMUB_STATUS_OK)
1163 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1165 if (status == DMUB_STATUS_OK && init) {
1166 /* Wait for firmware load to finish. */
1167 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1168 if (status != DMUB_STATUS_OK)
1169 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171 /* Perform the full hardware initialization. */
1172 dm_dmub_hw_init(adev);
1176 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1179 uint32_t logical_addr_low;
1180 uint32_t logical_addr_high;
1181 uint32_t agp_base, agp_bot, agp_top;
1182 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1184 memset(pa_config, 0, sizeof(*pa_config));
1186 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1187 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1189 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1191 * Raven2 has a HW issue that it is unable to use the vram which
1192 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1193 * workaround that increase system aperture high address (add 1)
1194 * to get rid of the VM fault and hardware hang.
1196 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1198 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1201 agp_bot = adev->gmc.agp_start >> 24;
1202 agp_top = adev->gmc.agp_end >> 24;
1205 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1206 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1207 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1208 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1209 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1210 page_table_base.low_part = lower_32_bits(pt_base);
1212 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1213 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1215 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1216 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1217 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1219 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1220 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1221 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1223 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1224 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1225 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1227 pa_config->is_hvm_enabled = 0;
1231 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1233 struct hpd_rx_irq_offload_work *offload_work;
1234 struct amdgpu_dm_connector *aconnector;
1235 struct dc_link *dc_link;
1236 struct amdgpu_device *adev;
1237 enum dc_connection_type new_connection_type = dc_connection_none;
1238 unsigned long flags;
1240 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1241 aconnector = offload_work->offload_wq->aconnector;
1244 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1248 adev = drm_to_adev(aconnector->base.dev);
1249 dc_link = aconnector->dc_link;
1251 mutex_lock(&aconnector->hpd_lock);
1252 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1253 DRM_ERROR("KMS: Failed to detect connector\n");
1254 mutex_unlock(&aconnector->hpd_lock);
1256 if (new_connection_type == dc_connection_none)
1259 if (amdgpu_in_reset(adev))
1262 mutex_lock(&adev->dm.dc_lock);
1263 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1264 dc_link_dp_handle_automated_test(dc_link);
1265 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1266 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1267 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1268 dc_link_dp_handle_link_loss(dc_link);
1269 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1270 offload_work->offload_wq->is_handling_link_loss = false;
1271 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1273 mutex_unlock(&adev->dm.dc_lock);
1276 kfree(offload_work);
1280 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1282 int max_caps = dc->caps.max_links;
1284 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1286 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1288 if (!hpd_rx_offload_wq)
1292 for (i = 0; i < max_caps; i++) {
1293 hpd_rx_offload_wq[i].wq =
1294 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1296 if (hpd_rx_offload_wq[i].wq == NULL) {
1297 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1301 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1304 return hpd_rx_offload_wq;
1307 struct amdgpu_stutter_quirk {
1315 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1316 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1317 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1321 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1323 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1325 while (p && p->chip_device != 0) {
1326 if (pdev->vendor == p->chip_vendor &&
1327 pdev->device == p->chip_device &&
1328 pdev->subsystem_vendor == p->subsys_vendor &&
1329 pdev->subsystem_device == p->subsys_device &&
1330 pdev->revision == p->revision) {
1338 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1341 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1342 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1347 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1348 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1353 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1360 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1362 const struct dmi_system_id *dmi_id;
1364 dm->aux_hpd_discon_quirk = false;
1366 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1368 dm->aux_hpd_discon_quirk = true;
1369 DRM_INFO("aux_hpd_discon_quirk attached\n");
1373 static int amdgpu_dm_init(struct amdgpu_device *adev)
1375 struct dc_init_data init_data;
1376 #ifdef CONFIG_DRM_AMD_DC_HDCP
1377 struct dc_callback_init init_params;
1381 adev->dm.ddev = adev_to_drm(adev);
1382 adev->dm.adev = adev;
1384 /* Zero all the fields */
1385 memset(&init_data, 0, sizeof(init_data));
1386 #ifdef CONFIG_DRM_AMD_DC_HDCP
1387 memset(&init_params, 0, sizeof(init_params));
1390 mutex_init(&adev->dm.dc_lock);
1391 mutex_init(&adev->dm.audio_lock);
1392 spin_lock_init(&adev->dm.vblank_lock);
1394 if(amdgpu_dm_irq_init(adev)) {
1395 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1399 init_data.asic_id.chip_family = adev->family;
1401 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1402 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1403 init_data.asic_id.chip_id = adev->pdev->device;
1405 init_data.asic_id.vram_width = adev->gmc.vram_width;
1406 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1407 init_data.asic_id.atombios_base_address =
1408 adev->mode_info.atom_context->bios;
1410 init_data.driver = adev;
1412 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1414 if (!adev->dm.cgs_device) {
1415 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1419 init_data.cgs_device = adev->dm.cgs_device;
1421 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1423 switch (adev->ip_versions[DCE_HWIP][0]) {
1424 case IP_VERSION(2, 1, 0):
1425 switch (adev->dm.dmcub_fw_version) {
1426 case 0: /* development */
1427 case 0x1: /* linux-firmware.git hash 6d9f399 */
1428 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1429 init_data.flags.disable_dmcu = false;
1432 init_data.flags.disable_dmcu = true;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1442 switch (adev->asic_type) {
1445 init_data.flags.gpu_vm_support = true;
1448 switch (adev->ip_versions[DCE_HWIP][0]) {
1449 case IP_VERSION(1, 0, 0):
1450 case IP_VERSION(1, 0, 1):
1451 /* enable S/G on PCO and RV2 */
1452 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1453 (adev->apu_flags & AMD_APU_IS_PICASSO))
1454 init_data.flags.gpu_vm_support = true;
1456 case IP_VERSION(2, 1, 0):
1457 case IP_VERSION(3, 0, 1):
1458 case IP_VERSION(3, 1, 2):
1459 case IP_VERSION(3, 1, 3):
1460 case IP_VERSION(3, 1, 5):
1461 case IP_VERSION(3, 1, 6):
1462 init_data.flags.gpu_vm_support = true;
1470 if (init_data.flags.gpu_vm_support)
1471 adev->mode_info.gpu_vm_support = true;
1473 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1474 init_data.flags.fbc_support = true;
1476 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1477 init_data.flags.multi_mon_pp_mclk_switch = true;
1479 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1480 init_data.flags.disable_fractional_pwm = true;
1482 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1483 init_data.flags.edp_no_power_sequencing = true;
1485 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1486 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1487 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1488 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1490 init_data.flags.seamless_boot_edp_requested = false;
1492 if (check_seamless_boot_capability(adev)) {
1493 init_data.flags.seamless_boot_edp_requested = true;
1494 init_data.flags.allow_seamless_boot_optimization = true;
1495 DRM_INFO("Seamless boot condition check passed\n");
1498 init_data.flags.enable_mipi_converter_optimization = true;
1500 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1501 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1503 INIT_LIST_HEAD(&adev->dm.da_list);
1505 retrieve_dmi_info(&adev->dm);
1507 /* Display Core create. */
1508 adev->dm.dc = dc_create(&init_data);
1511 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1513 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1517 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1518 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1519 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1522 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1523 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1524 if (dm_should_disable_stutter(adev->pdev))
1525 adev->dm.dc->debug.disable_stutter = true;
1527 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1528 adev->dm.dc->debug.disable_stutter = true;
1530 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1531 adev->dm.dc->debug.disable_dsc = true;
1532 adev->dm.dc->debug.disable_dsc_edp = true;
1535 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1536 adev->dm.dc->debug.disable_clock_gate = true;
1538 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1539 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1541 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1543 r = dm_dmub_hw_init(adev);
1545 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1549 dc_hardware_init(adev->dm.dc);
1551 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1552 if (!adev->dm.hpd_rx_offload_wq) {
1553 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1557 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1558 struct dc_phy_addr_space_config pa_config;
1560 mmhub_read_system_context(adev, &pa_config);
1562 // Call the DC init_memory func
1563 dc_setup_system_context(adev->dm.dc, &pa_config);
1566 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1567 if (!adev->dm.freesync_module) {
1569 "amdgpu: failed to initialize freesync_module.\n");
1571 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1572 adev->dm.freesync_module);
1574 amdgpu_dm_init_color_mod();
1576 if (adev->dm.dc->caps.max_links > 0) {
1577 adev->dm.vblank_control_workqueue =
1578 create_singlethread_workqueue("dm_vblank_control_workqueue");
1579 if (!adev->dm.vblank_control_workqueue)
1580 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1583 #ifdef CONFIG_DRM_AMD_DC_HDCP
1584 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1585 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1587 if (!adev->dm.hdcp_workqueue)
1588 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1592 dc_init_callbacks(adev->dm.dc, &init_params);
1595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1596 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1598 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1599 init_completion(&adev->dm.dmub_aux_transfer_done);
1600 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1601 if (!adev->dm.dmub_notify) {
1602 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1606 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1607 if (!adev->dm.delayed_hpd_wq) {
1608 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1612 amdgpu_dm_outbox_init(adev);
1613 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1614 dmub_aux_setconfig_callback, false)) {
1615 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1618 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1619 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1622 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1623 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1628 if (amdgpu_dm_initialize_drm_device(adev)) {
1630 "amdgpu: failed to initialize sw for display support.\n");
1634 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1635 * It is expected that DMUB will resend any pending notifications at this point, for
1636 * example HPD from DPIA.
1638 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1639 dc_enable_dmub_outbox(adev->dm.dc);
1641 /* create fake encoders for MST */
1642 dm_dp_create_fake_mst_encoders(adev);
1644 /* TODO: Add_display_info? */
1646 /* TODO use dynamic cursor width */
1647 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1648 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1650 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1652 "amdgpu: failed to initialize sw for display support.\n");
1657 DRM_DEBUG_DRIVER("KMS initialized.\n");
1661 amdgpu_dm_fini(adev);
1666 static int amdgpu_dm_early_fini(void *handle)
1668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 amdgpu_dm_audio_fini(adev);
1675 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1679 if (adev->dm.vblank_control_workqueue) {
1680 destroy_workqueue(adev->dm.vblank_control_workqueue);
1681 adev->dm.vblank_control_workqueue = NULL;
1684 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1685 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1688 amdgpu_dm_destroy_drm_device(&adev->dm);
1690 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1691 if (adev->dm.crc_rd_wrk) {
1692 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1693 kfree(adev->dm.crc_rd_wrk);
1694 adev->dm.crc_rd_wrk = NULL;
1697 #ifdef CONFIG_DRM_AMD_DC_HDCP
1698 if (adev->dm.hdcp_workqueue) {
1699 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1700 adev->dm.hdcp_workqueue = NULL;
1704 dc_deinit_callbacks(adev->dm.dc);
1707 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1709 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1710 kfree(adev->dm.dmub_notify);
1711 adev->dm.dmub_notify = NULL;
1712 destroy_workqueue(adev->dm.delayed_hpd_wq);
1713 adev->dm.delayed_hpd_wq = NULL;
1716 if (adev->dm.dmub_bo)
1717 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1718 &adev->dm.dmub_bo_gpu_addr,
1719 &adev->dm.dmub_bo_cpu_addr);
1721 if (adev->dm.hpd_rx_offload_wq) {
1722 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1723 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1724 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1725 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1729 kfree(adev->dm.hpd_rx_offload_wq);
1730 adev->dm.hpd_rx_offload_wq = NULL;
1733 /* DC Destroy TODO: Replace destroy DAL */
1735 dc_destroy(&adev->dm.dc);
1737 * TODO: pageflip, vlank interrupt
1739 * amdgpu_dm_irq_fini(adev);
1742 if (adev->dm.cgs_device) {
1743 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1744 adev->dm.cgs_device = NULL;
1746 if (adev->dm.freesync_module) {
1747 mod_freesync_destroy(adev->dm.freesync_module);
1748 adev->dm.freesync_module = NULL;
1751 mutex_destroy(&adev->dm.audio_lock);
1752 mutex_destroy(&adev->dm.dc_lock);
1757 static int load_dmcu_fw(struct amdgpu_device *adev)
1759 const char *fw_name_dmcu = NULL;
1761 const struct dmcu_firmware_header_v1_0 *hdr;
1763 switch(adev->asic_type) {
1764 #if defined(CONFIG_DRM_AMD_DC_SI)
1779 case CHIP_POLARIS11:
1780 case CHIP_POLARIS10:
1781 case CHIP_POLARIS12:
1788 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1791 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1792 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1793 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1794 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1799 switch (adev->ip_versions[DCE_HWIP][0]) {
1800 case IP_VERSION(2, 0, 2):
1801 case IP_VERSION(2, 0, 3):
1802 case IP_VERSION(2, 0, 0):
1803 case IP_VERSION(2, 1, 0):
1804 case IP_VERSION(3, 0, 0):
1805 case IP_VERSION(3, 0, 2):
1806 case IP_VERSION(3, 0, 3):
1807 case IP_VERSION(3, 0, 1):
1808 case IP_VERSION(3, 1, 2):
1809 case IP_VERSION(3, 1, 3):
1810 case IP_VERSION(3, 1, 4):
1811 case IP_VERSION(3, 1, 5):
1812 case IP_VERSION(3, 1, 6):
1813 case IP_VERSION(3, 2, 0):
1814 case IP_VERSION(3, 2, 1):
1819 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1824 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1828 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1830 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1831 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1832 adev->dm.fw_dmcu = NULL;
1836 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1841 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1843 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1845 release_firmware(adev->dm.fw_dmcu);
1846 adev->dm.fw_dmcu = NULL;
1850 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1851 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1852 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1853 adev->firmware.fw_size +=
1854 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1856 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1857 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1858 adev->firmware.fw_size +=
1859 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1861 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1863 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1868 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1870 struct amdgpu_device *adev = ctx;
1872 return dm_read_reg(adev->dm.dc->ctx, address);
1875 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1878 struct amdgpu_device *adev = ctx;
1880 return dm_write_reg(adev->dm.dc->ctx, address, value);
1883 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1885 struct dmub_srv_create_params create_params;
1886 struct dmub_srv_region_params region_params;
1887 struct dmub_srv_region_info region_info;
1888 struct dmub_srv_fb_params fb_params;
1889 struct dmub_srv_fb_info *fb_info;
1890 struct dmub_srv *dmub_srv;
1891 const struct dmcub_firmware_header_v1_0 *hdr;
1892 const char *fw_name_dmub;
1893 enum dmub_asic dmub_asic;
1894 enum dmub_status status;
1897 switch (adev->ip_versions[DCE_HWIP][0]) {
1898 case IP_VERSION(2, 1, 0):
1899 dmub_asic = DMUB_ASIC_DCN21;
1900 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1901 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1902 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1904 case IP_VERSION(3, 0, 0):
1905 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1906 dmub_asic = DMUB_ASIC_DCN30;
1907 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1909 dmub_asic = DMUB_ASIC_DCN30;
1910 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1913 case IP_VERSION(3, 0, 1):
1914 dmub_asic = DMUB_ASIC_DCN301;
1915 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1917 case IP_VERSION(3, 0, 2):
1918 dmub_asic = DMUB_ASIC_DCN302;
1919 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1921 case IP_VERSION(3, 0, 3):
1922 dmub_asic = DMUB_ASIC_DCN303;
1923 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1925 case IP_VERSION(3, 1, 2):
1926 case IP_VERSION(3, 1, 3):
1927 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1928 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1930 case IP_VERSION(3, 1, 4):
1931 dmub_asic = DMUB_ASIC_DCN314;
1932 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1934 case IP_VERSION(3, 1, 5):
1935 dmub_asic = DMUB_ASIC_DCN315;
1936 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1938 case IP_VERSION(3, 1, 6):
1939 dmub_asic = DMUB_ASIC_DCN316;
1940 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1942 case IP_VERSION(3, 2, 0):
1943 dmub_asic = DMUB_ASIC_DCN32;
1944 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1946 case IP_VERSION(3, 2, 1):
1947 dmub_asic = DMUB_ASIC_DCN321;
1948 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1951 /* ASIC doesn't support DMUB. */
1955 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1957 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1961 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1963 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1967 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1968 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1970 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1971 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1972 AMDGPU_UCODE_ID_DMCUB;
1973 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1975 adev->firmware.fw_size +=
1976 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1978 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1979 adev->dm.dmcub_fw_version);
1983 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1984 dmub_srv = adev->dm.dmub_srv;
1987 DRM_ERROR("Failed to allocate DMUB service!\n");
1991 memset(&create_params, 0, sizeof(create_params));
1992 create_params.user_ctx = adev;
1993 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1994 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1995 create_params.asic = dmub_asic;
1997 /* Create the DMUB service. */
1998 status = dmub_srv_create(dmub_srv, &create_params);
1999 if (status != DMUB_STATUS_OK) {
2000 DRM_ERROR("Error creating DMUB service: %d\n", status);
2004 /* Calculate the size of all the regions for the DMUB service. */
2005 memset(®ion_params, 0, sizeof(region_params));
2007 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2008 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2009 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2010 region_params.vbios_size = adev->bios_size;
2011 region_params.fw_bss_data = region_params.bss_data_size ?
2012 adev->dm.dmub_fw->data +
2013 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2014 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2015 region_params.fw_inst_const =
2016 adev->dm.dmub_fw->data +
2017 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2020 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2023 if (status != DMUB_STATUS_OK) {
2024 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2029 * Allocate a framebuffer based on the total size of all the regions.
2030 * TODO: Move this into GART.
2032 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2033 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2034 &adev->dm.dmub_bo_gpu_addr,
2035 &adev->dm.dmub_bo_cpu_addr);
2039 /* Rebase the regions on the framebuffer address. */
2040 memset(&fb_params, 0, sizeof(fb_params));
2041 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2042 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2043 fb_params.region_info = ®ion_info;
2045 adev->dm.dmub_fb_info =
2046 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2047 fb_info = adev->dm.dmub_fb_info;
2051 "Failed to allocate framebuffer info for DMUB service!\n");
2055 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2056 if (status != DMUB_STATUS_OK) {
2057 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2064 static int dm_sw_init(void *handle)
2066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2069 r = dm_dmub_sw_init(adev);
2073 return load_dmcu_fw(adev);
2076 static int dm_sw_fini(void *handle)
2078 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2080 kfree(adev->dm.dmub_fb_info);
2081 adev->dm.dmub_fb_info = NULL;
2083 if (adev->dm.dmub_srv) {
2084 dmub_srv_destroy(adev->dm.dmub_srv);
2085 adev->dm.dmub_srv = NULL;
2088 release_firmware(adev->dm.dmub_fw);
2089 adev->dm.dmub_fw = NULL;
2091 release_firmware(adev->dm.fw_dmcu);
2092 adev->dm.fw_dmcu = NULL;
2097 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2099 struct amdgpu_dm_connector *aconnector;
2100 struct drm_connector *connector;
2101 struct drm_connector_list_iter iter;
2104 drm_connector_list_iter_begin(dev, &iter);
2105 drm_for_each_connector_iter(connector, &iter) {
2106 aconnector = to_amdgpu_dm_connector(connector);
2107 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2108 aconnector->mst_mgr.aux) {
2109 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2111 aconnector->base.base.id);
2113 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2115 DRM_ERROR("DM_MST: Failed to start MST\n");
2116 aconnector->dc_link->type =
2117 dc_connection_single;
2122 drm_connector_list_iter_end(&iter);
2127 static int dm_late_init(void *handle)
2129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2131 struct dmcu_iram_parameters params;
2132 unsigned int linear_lut[16];
2134 struct dmcu *dmcu = NULL;
2136 dmcu = adev->dm.dc->res_pool->dmcu;
2138 for (i = 0; i < 16; i++)
2139 linear_lut[i] = 0xFFFF * i / 15;
2142 params.backlight_ramping_override = false;
2143 params.backlight_ramping_start = 0xCCCC;
2144 params.backlight_ramping_reduction = 0xCCCCCCCC;
2145 params.backlight_lut_array_size = 16;
2146 params.backlight_lut_array = linear_lut;
2148 /* Min backlight level after ABM reduction, Don't allow below 1%
2149 * 0xFFFF x 0.01 = 0x28F
2151 params.min_abm_backlight = 0x28F;
2152 /* In the case where abm is implemented on dmcub,
2153 * dmcu object will be null.
2154 * ABM 2.4 and up are implemented on dmcub.
2157 if (!dmcu_load_iram(dmcu, params))
2159 } else if (adev->dm.dc->ctx->dmub_srv) {
2160 struct dc_link *edp_links[MAX_NUM_EDP];
2163 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2164 for (i = 0; i < edp_num; i++) {
2165 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2170 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2173 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2175 struct amdgpu_dm_connector *aconnector;
2176 struct drm_connector *connector;
2177 struct drm_connector_list_iter iter;
2178 struct drm_dp_mst_topology_mgr *mgr;
2180 bool need_hotplug = false;
2182 drm_connector_list_iter_begin(dev, &iter);
2183 drm_for_each_connector_iter(connector, &iter) {
2184 aconnector = to_amdgpu_dm_connector(connector);
2185 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2186 aconnector->mst_port)
2189 mgr = &aconnector->mst_mgr;
2192 drm_dp_mst_topology_mgr_suspend(mgr);
2194 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2196 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2197 aconnector->dc_link);
2198 need_hotplug = true;
2202 drm_connector_list_iter_end(&iter);
2205 drm_kms_helper_hotplug_event(dev);
2208 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2212 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2213 * on window driver dc implementation.
2214 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2215 * should be passed to smu during boot up and resume from s3.
2216 * boot up: dc calculate dcn watermark clock settings within dc_create,
2217 * dcn20_resource_construct
2218 * then call pplib functions below to pass the settings to smu:
2219 * smu_set_watermarks_for_clock_ranges
2220 * smu_set_watermarks_table
2221 * navi10_set_watermarks_table
2222 * smu_write_watermarks_table
2224 * For Renoir, clock settings of dcn watermark are also fixed values.
2225 * dc has implemented different flow for window driver:
2226 * dc_hardware_init / dc_set_power_state
2231 * smu_set_watermarks_for_clock_ranges
2232 * renoir_set_watermarks_table
2233 * smu_write_watermarks_table
2236 * dc_hardware_init -> amdgpu_dm_init
2237 * dc_set_power_state --> dm_resume
2239 * therefore, this function apply to navi10/12/14 but not Renoir
2242 switch (adev->ip_versions[DCE_HWIP][0]) {
2243 case IP_VERSION(2, 0, 2):
2244 case IP_VERSION(2, 0, 0):
2250 ret = amdgpu_dpm_write_watermarks_table(adev);
2252 DRM_ERROR("Failed to update WMTABLE!\n");
2260 * dm_hw_init() - Initialize DC device
2261 * @handle: The base driver device containing the amdgpu_dm device.
2263 * Initialize the &struct amdgpu_display_manager device. This involves calling
2264 * the initializers of each DM component, then populating the struct with them.
2266 * Although the function implies hardware initialization, both hardware and
2267 * software are initialized here. Splitting them out to their relevant init
2268 * hooks is a future TODO item.
2270 * Some notable things that are initialized here:
2272 * - Display Core, both software and hardware
2273 * - DC modules that we need (freesync and color management)
2274 * - DRM software states
2275 * - Interrupt sources and handlers
2277 * - Debug FS entries, if enabled
2279 static int dm_hw_init(void *handle)
2281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2282 /* Create DAL display manager */
2283 amdgpu_dm_init(adev);
2284 amdgpu_dm_hpd_init(adev);
2290 * dm_hw_fini() - Teardown DC device
2291 * @handle: The base driver device containing the amdgpu_dm device.
2293 * Teardown components within &struct amdgpu_display_manager that require
2294 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2295 * were loaded. Also flush IRQ workqueues and disable them.
2297 static int dm_hw_fini(void *handle)
2299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2301 amdgpu_dm_hpd_fini(adev);
2303 amdgpu_dm_irq_fini(adev);
2304 amdgpu_dm_fini(adev);
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 struct dc_state *state, bool enable)
2312 enum dc_irq_source irq_source;
2313 struct amdgpu_crtc *acrtc;
2317 for (i = 0; i < state->stream_count; i++) {
2318 acrtc = get_crtc_by_otg_inst(
2319 adev, state->stream_status[i].primary_otg_inst);
2321 if (acrtc && state->stream_status[i].plane_count != 0) {
2322 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 acrtc->crtc_id, enable ? "en" : "dis", rc);
2327 DRM_WARN("Failed to %s pflip interrupts\n",
2328 enable ? "enable" : "disable");
2331 rc = dm_enable_vblank(&acrtc->base);
2333 DRM_WARN("Failed to enable vblank interrupts\n");
2335 dm_disable_vblank(&acrtc->base);
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2345 struct dc_state *context = NULL;
2346 enum dc_status res = DC_ERROR_UNEXPECTED;
2348 struct dc_stream_state *del_streams[MAX_PIPES];
2349 int del_streams_count = 0;
2351 memset(del_streams, 0, sizeof(del_streams));
2353 context = dc_create_state(dc);
2354 if (context == NULL)
2355 goto context_alloc_fail;
2357 dc_resource_state_copy_construct_current(dc, context);
2359 /* First remove from context all streams */
2360 for (i = 0; i < context->stream_count; i++) {
2361 struct dc_stream_state *stream = context->streams[i];
2363 del_streams[del_streams_count++] = stream;
2366 /* Remove all planes for removed streams and then remove the streams */
2367 for (i = 0; i < del_streams_count; i++) {
2368 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 res = DC_FAIL_DETACH_SURFACES;
2373 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2378 res = dc_commit_state(dc, context);
2381 dc_release_state(context);
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2391 if (dm->hpd_rx_offload_wq) {
2392 for (i = 0; i < dm->dc->caps.max_links; i++)
2393 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2397 static int dm_suspend(void *handle)
2399 struct amdgpu_device *adev = handle;
2400 struct amdgpu_display_manager *dm = &adev->dm;
2403 if (amdgpu_in_reset(adev)) {
2404 mutex_lock(&dm->dc_lock);
2406 dc_allow_idle_optimizations(adev->dm.dc, false);
2408 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2410 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2412 amdgpu_dm_commit_zero_streams(dm->dc);
2414 amdgpu_dm_irq_suspend(adev);
2416 hpd_rx_irq_work_suspend(dm);
2421 WARN_ON(adev->dm.cached_state);
2422 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2424 s3_handle_mst(adev_to_drm(adev), true);
2426 amdgpu_dm_irq_suspend(adev);
2428 hpd_rx_irq_work_suspend(dm);
2430 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2435 struct amdgpu_dm_connector *
2436 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2437 struct drm_crtc *crtc)
2440 struct drm_connector_state *new_con_state;
2441 struct drm_connector *connector;
2442 struct drm_crtc *crtc_from_state;
2444 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2445 crtc_from_state = new_con_state->crtc;
2447 if (crtc_from_state == crtc)
2448 return to_amdgpu_dm_connector(connector);
2454 static void emulated_link_detect(struct dc_link *link)
2456 struct dc_sink_init_data sink_init_data = { 0 };
2457 struct display_sink_capability sink_caps = { 0 };
2458 enum dc_edid_status edid_status;
2459 struct dc_context *dc_ctx = link->ctx;
2460 struct dc_sink *sink = NULL;
2461 struct dc_sink *prev_sink = NULL;
2463 link->type = dc_connection_none;
2464 prev_sink = link->local_sink;
2467 dc_sink_release(prev_sink);
2469 switch (link->connector_signal) {
2470 case SIGNAL_TYPE_HDMI_TYPE_A: {
2471 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2472 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2476 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2477 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2478 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2482 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2483 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2484 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2488 case SIGNAL_TYPE_LVDS: {
2489 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2490 sink_caps.signal = SIGNAL_TYPE_LVDS;
2494 case SIGNAL_TYPE_EDP: {
2495 sink_caps.transaction_type =
2496 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2497 sink_caps.signal = SIGNAL_TYPE_EDP;
2501 case SIGNAL_TYPE_DISPLAY_PORT: {
2502 sink_caps.transaction_type =
2503 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2504 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2509 DC_ERROR("Invalid connector type! signal:%d\n",
2510 link->connector_signal);
2514 sink_init_data.link = link;
2515 sink_init_data.sink_signal = sink_caps.signal;
2517 sink = dc_sink_create(&sink_init_data);
2519 DC_ERROR("Failed to create sink!\n");
2523 /* dc_sink_create returns a new reference */
2524 link->local_sink = sink;
2526 edid_status = dm_helpers_read_local_edid(
2531 if (edid_status != EDID_OK)
2532 DC_ERROR("Failed to read EDID");
2536 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2537 struct amdgpu_display_manager *dm)
2540 struct dc_surface_update surface_updates[MAX_SURFACES];
2541 struct dc_plane_info plane_infos[MAX_SURFACES];
2542 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2543 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2544 struct dc_stream_update stream_update;
2548 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2551 dm_error("Failed to allocate update bundle\n");
2555 for (k = 0; k < dc_state->stream_count; k++) {
2556 bundle->stream_update.stream = dc_state->streams[k];
2558 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2559 bundle->surface_updates[m].surface =
2560 dc_state->stream_status->plane_states[m];
2561 bundle->surface_updates[m].surface->force_full_update =
2564 dc_commit_updates_for_stream(
2565 dm->dc, bundle->surface_updates,
2566 dc_state->stream_status->plane_count,
2567 dc_state->streams[k], &bundle->stream_update, dc_state);
2576 static int dm_resume(void *handle)
2578 struct amdgpu_device *adev = handle;
2579 struct drm_device *ddev = adev_to_drm(adev);
2580 struct amdgpu_display_manager *dm = &adev->dm;
2581 struct amdgpu_dm_connector *aconnector;
2582 struct drm_connector *connector;
2583 struct drm_connector_list_iter iter;
2584 struct drm_crtc *crtc;
2585 struct drm_crtc_state *new_crtc_state;
2586 struct dm_crtc_state *dm_new_crtc_state;
2587 struct drm_plane *plane;
2588 struct drm_plane_state *new_plane_state;
2589 struct dm_plane_state *dm_new_plane_state;
2590 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2591 enum dc_connection_type new_connection_type = dc_connection_none;
2592 struct dc_state *dc_state;
2595 if (amdgpu_in_reset(adev)) {
2596 dc_state = dm->cached_dc_state;
2599 * The dc->current_state is backed up into dm->cached_dc_state
2600 * before we commit 0 streams.
2602 * DC will clear link encoder assignments on the real state
2603 * but the changes won't propagate over to the copy we made
2604 * before the 0 streams commit.
2606 * DC expects that link encoder assignments are *not* valid
2607 * when committing a state, so as a workaround we can copy
2608 * off of the current state.
2610 * We lose the previous assignments, but we had already
2611 * commit 0 streams anyway.
2613 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2615 r = dm_dmub_hw_init(adev);
2617 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2619 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2622 amdgpu_dm_irq_resume_early(adev);
2624 for (i = 0; i < dc_state->stream_count; i++) {
2625 dc_state->streams[i]->mode_changed = true;
2626 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2627 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2632 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2633 amdgpu_dm_outbox_init(adev);
2634 dc_enable_dmub_outbox(adev->dm.dc);
2637 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2639 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2641 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2643 dc_release_state(dm->cached_dc_state);
2644 dm->cached_dc_state = NULL;
2646 amdgpu_dm_irq_resume_late(adev);
2648 mutex_unlock(&dm->dc_lock);
2652 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2653 dc_release_state(dm_state->context);
2654 dm_state->context = dc_create_state(dm->dc);
2655 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2656 dc_resource_state_construct(dm->dc, dm_state->context);
2658 /* Before powering on DC we need to re-initialize DMUB. */
2659 dm_dmub_hw_resume(adev);
2661 /* Re-enable outbox interrupts for DPIA. */
2662 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2663 amdgpu_dm_outbox_init(adev);
2664 dc_enable_dmub_outbox(adev->dm.dc);
2667 /* power on hardware */
2668 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2670 /* program HPD filter */
2674 * early enable HPD Rx IRQ, should be done before set mode as short
2675 * pulse interrupts are used for MST
2677 amdgpu_dm_irq_resume_early(adev);
2679 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2680 s3_handle_mst(ddev, false);
2683 drm_connector_list_iter_begin(ddev, &iter);
2684 drm_for_each_connector_iter(connector, &iter) {
2685 aconnector = to_amdgpu_dm_connector(connector);
2688 * this is the case when traversing through already created
2689 * MST connectors, should be skipped
2691 if (aconnector->dc_link &&
2692 aconnector->dc_link->type == dc_connection_mst_branch)
2695 mutex_lock(&aconnector->hpd_lock);
2696 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2697 DRM_ERROR("KMS: Failed to detect connector\n");
2699 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2700 emulated_link_detect(aconnector->dc_link);
2702 mutex_lock(&dm->dc_lock);
2703 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2704 mutex_unlock(&dm->dc_lock);
2707 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708 aconnector->fake_enable = false;
2710 if (aconnector->dc_sink)
2711 dc_sink_release(aconnector->dc_sink);
2712 aconnector->dc_sink = NULL;
2713 amdgpu_dm_update_connector_after_detect(aconnector);
2714 mutex_unlock(&aconnector->hpd_lock);
2716 drm_connector_list_iter_end(&iter);
2718 /* Force mode set in atomic commit */
2719 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720 new_crtc_state->active_changed = true;
2723 * atomic_check is expected to create the dc states. We need to release
2724 * them here, since they were duplicated as part of the suspend
2727 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729 if (dm_new_crtc_state->stream) {
2730 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731 dc_stream_release(dm_new_crtc_state->stream);
2732 dm_new_crtc_state->stream = NULL;
2736 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738 if (dm_new_plane_state->dc_state) {
2739 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740 dc_plane_state_release(dm_new_plane_state->dc_state);
2741 dm_new_plane_state->dc_state = NULL;
2745 drm_atomic_helper_resume(ddev, dm->cached_state);
2747 dm->cached_state = NULL;
2749 amdgpu_dm_irq_resume_late(adev);
2751 amdgpu_dm_smu_write_watermarks_table(adev);
2759 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761 * the base driver's device list to be initialized and torn down accordingly.
2763 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2768 .early_init = dm_early_init,
2769 .late_init = dm_late_init,
2770 .sw_init = dm_sw_init,
2771 .sw_fini = dm_sw_fini,
2772 .early_fini = amdgpu_dm_early_fini,
2773 .hw_init = dm_hw_init,
2774 .hw_fini = dm_hw_fini,
2775 .suspend = dm_suspend,
2776 .resume = dm_resume,
2777 .is_idle = dm_is_idle,
2778 .wait_for_idle = dm_wait_for_idle,
2779 .check_soft_reset = dm_check_soft_reset,
2780 .soft_reset = dm_soft_reset,
2781 .set_clockgating_state = dm_set_clockgating_state,
2782 .set_powergating_state = dm_set_powergating_state,
2785 const struct amdgpu_ip_block_version dm_ip_block =
2787 .type = AMD_IP_BLOCK_TYPE_DCE,
2791 .funcs = &amdgpu_dm_funcs,
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802 .fb_create = amdgpu_display_user_framebuffer_create,
2803 .get_format_info = amd_get_format_info,
2804 .output_poll_changed = drm_fb_helper_output_poll_changed,
2805 .atomic_check = amdgpu_dm_atomic_check,
2806 .atomic_commit = drm_atomic_helper_commit,
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2815 u32 max_avg, min_cll, max, min, q, r;
2816 struct amdgpu_dm_backlight_caps *caps;
2817 struct amdgpu_display_manager *dm;
2818 struct drm_connector *conn_base;
2819 struct amdgpu_device *adev;
2820 struct dc_link *link = NULL;
2821 static const u8 pre_computed_values[] = {
2822 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2826 if (!aconnector || !aconnector->dc_link)
2829 link = aconnector->dc_link;
2830 if (link->connector_signal != SIGNAL_TYPE_EDP)
2833 conn_base = &aconnector->base;
2834 adev = drm_to_adev(conn_base->dev);
2836 for (i = 0; i < dm->num_of_edps; i++) {
2837 if (link == dm->backlight_link[i])
2840 if (i >= dm->num_of_edps)
2842 caps = &dm->backlight_caps[i];
2843 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844 caps->aux_support = false;
2845 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2846 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2848 if (caps->ext_caps->bits.oled == 1 /*||
2849 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851 caps->aux_support = true;
2853 if (amdgpu_backlight == 0)
2854 caps->aux_support = false;
2855 else if (amdgpu_backlight == 1)
2856 caps->aux_support = true;
2858 /* From the specification (CTA-861-G), for calculating the maximum
2859 * luminance we need to use:
2860 * Luminance = 50*2**(CV/32)
2861 * Where CV is a one-byte value.
2862 * For calculating this expression we may need float point precision;
2863 * to avoid this complexity level, we take advantage that CV is divided
2864 * by a constant. From the Euclids division algorithm, we know that CV
2865 * can be written as: CV = 32*q + r. Next, we replace CV in the
2866 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867 * need to pre-compute the value of r/32. For pre-computing the values
2868 * We just used the following Ruby line:
2869 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870 * The results of the above expressions can be verified at
2871 * pre_computed_values.
2875 max = (1 << q) * pre_computed_values[r];
2877 // min luminance: maxLum * (CV/255)^2 / 100
2878 q = DIV_ROUND_CLOSEST(min_cll, 255);
2879 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2881 caps->aux_max_input_signal = max;
2882 caps->aux_min_input_signal = min;
2885 void amdgpu_dm_update_connector_after_detect(
2886 struct amdgpu_dm_connector *aconnector)
2888 struct drm_connector *connector = &aconnector->base;
2889 struct drm_device *dev = connector->dev;
2890 struct dc_sink *sink;
2892 /* MST handled by drm_mst framework */
2893 if (aconnector->mst_mgr.mst_state == true)
2896 sink = aconnector->dc_link->local_sink;
2898 dc_sink_retain(sink);
2901 * Edid mgmt connector gets first update only in mode_valid hook and then
2902 * the connector sink is set to either fake or physical sink depends on link status.
2903 * Skip if already done during boot.
2905 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906 && aconnector->dc_em_sink) {
2909 * For S3 resume with headless use eml_sink to fake stream
2910 * because on resume connector->sink is set to NULL
2912 mutex_lock(&dev->mode_config.mutex);
2915 if (aconnector->dc_sink) {
2916 amdgpu_dm_update_freesync_caps(connector, NULL);
2918 * retain and release below are used to
2919 * bump up refcount for sink because the link doesn't point
2920 * to it anymore after disconnect, so on next crtc to connector
2921 * reshuffle by UMD we will get into unwanted dc_sink release
2923 dc_sink_release(aconnector->dc_sink);
2925 aconnector->dc_sink = sink;
2926 dc_sink_retain(aconnector->dc_sink);
2927 amdgpu_dm_update_freesync_caps(connector,
2930 amdgpu_dm_update_freesync_caps(connector, NULL);
2931 if (!aconnector->dc_sink) {
2932 aconnector->dc_sink = aconnector->dc_em_sink;
2933 dc_sink_retain(aconnector->dc_sink);
2937 mutex_unlock(&dev->mode_config.mutex);
2940 dc_sink_release(sink);
2945 * TODO: temporary guard to look for proper fix
2946 * if this sink is MST sink, we should not do anything
2948 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949 dc_sink_release(sink);
2953 if (aconnector->dc_sink == sink) {
2955 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2958 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959 aconnector->connector_id);
2961 dc_sink_release(sink);
2965 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966 aconnector->connector_id, aconnector->dc_sink, sink);
2968 mutex_lock(&dev->mode_config.mutex);
2971 * 1. Update status of the drm connector
2972 * 2. Send an event and let userspace tell us what to do
2976 * TODO: check if we still need the S3 mode update workaround.
2977 * If yes, put it here.
2979 if (aconnector->dc_sink) {
2980 amdgpu_dm_update_freesync_caps(connector, NULL);
2981 dc_sink_release(aconnector->dc_sink);
2984 aconnector->dc_sink = sink;
2985 dc_sink_retain(aconnector->dc_sink);
2986 if (sink->dc_edid.length == 0) {
2987 aconnector->edid = NULL;
2988 if (aconnector->dc_link->aux_mode) {
2989 drm_dp_cec_unset_edid(
2990 &aconnector->dm_dp_aux.aux);
2994 (struct edid *)sink->dc_edid.raw_edid;
2996 if (aconnector->dc_link->aux_mode)
2997 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3001 drm_connector_update_edid_property(connector, aconnector->edid);
3002 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003 update_connector_ext_caps(aconnector);
3005 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006 amdgpu_dm_update_freesync_caps(connector, NULL);
3007 drm_connector_update_edid_property(connector, NULL);
3008 aconnector->num_modes = 0;
3009 dc_sink_release(aconnector->dc_sink);
3010 aconnector->dc_sink = NULL;
3011 aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3019 mutex_unlock(&dev->mode_config.mutex);
3021 update_subconnector_property(aconnector);
3024 dc_sink_release(sink);
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3029 struct drm_connector *connector = &aconnector->base;
3030 struct drm_device *dev = connector->dev;
3031 enum dc_connection_type new_connection_type = dc_connection_none;
3032 struct amdgpu_device *adev = drm_to_adev(dev);
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3038 if (adev->dm.disable_hpd_irq)
3042 * In case of failure or MST no need to update connector status or notify the OS
3043 * since (for MST case) MST does this in its own context.
3045 mutex_lock(&aconnector->hpd_lock);
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048 if (adev->dm.hdcp_workqueue) {
3049 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050 dm_con_state->update_hdcp = true;
3053 if (aconnector->fake_enable)
3054 aconnector->fake_enable = false;
3056 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057 DRM_ERROR("KMS: Failed to detect connector\n");
3059 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060 emulated_link_detect(aconnector->dc_link);
3062 drm_modeset_lock_all(dev);
3063 dm_restore_drm_connector_state(dev, connector);
3064 drm_modeset_unlock_all(dev);
3066 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067 drm_kms_helper_connector_hotplug_event(connector);
3069 mutex_lock(&adev->dm.dc_lock);
3070 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3071 mutex_unlock(&adev->dm.dc_lock);
3073 amdgpu_dm_update_connector_after_detect(aconnector);
3075 drm_modeset_lock_all(dev);
3076 dm_restore_drm_connector_state(dev, connector);
3077 drm_modeset_unlock_all(dev);
3079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3080 drm_kms_helper_connector_hotplug_event(connector);
3083 mutex_unlock(&aconnector->hpd_lock);
3087 static void handle_hpd_irq(void *param)
3089 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3091 handle_hpd_irq_helper(aconnector);
3095 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3097 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3099 bool new_irq_handled = false;
3101 int dpcd_bytes_to_read;
3103 const int max_process_count = 30;
3104 int process_count = 0;
3106 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3108 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3109 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3110 /* DPCD 0x200 - 0x201 for downstream IRQ */
3111 dpcd_addr = DP_SINK_COUNT;
3113 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3114 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3115 dpcd_addr = DP_SINK_COUNT_ESI;
3118 dret = drm_dp_dpcd_read(
3119 &aconnector->dm_dp_aux.aux,
3122 dpcd_bytes_to_read);
3124 while (dret == dpcd_bytes_to_read &&
3125 process_count < max_process_count) {
3131 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3132 /* handle HPD short pulse irq */
3133 if (aconnector->mst_mgr.mst_state)
3135 &aconnector->mst_mgr,
3139 if (new_irq_handled) {
3140 /* ACK at DPCD to notify down stream */
3141 const int ack_dpcd_bytes_to_write =
3142 dpcd_bytes_to_read - 1;
3144 for (retry = 0; retry < 3; retry++) {
3147 wret = drm_dp_dpcd_write(
3148 &aconnector->dm_dp_aux.aux,
3151 ack_dpcd_bytes_to_write);
3152 if (wret == ack_dpcd_bytes_to_write)
3156 /* check if there is new irq to be handled */
3157 dret = drm_dp_dpcd_read(
3158 &aconnector->dm_dp_aux.aux,
3161 dpcd_bytes_to_read);
3163 new_irq_handled = false;
3169 if (process_count == max_process_count)
3170 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3173 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3174 union hpd_irq_data hpd_irq_data)
3176 struct hpd_rx_irq_offload_work *offload_work =
3177 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3179 if (!offload_work) {
3180 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3184 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3185 offload_work->data = hpd_irq_data;
3186 offload_work->offload_wq = offload_wq;
3188 queue_work(offload_wq->wq, &offload_work->work);
3189 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3192 static void handle_hpd_rx_irq(void *param)
3194 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3195 struct drm_connector *connector = &aconnector->base;
3196 struct drm_device *dev = connector->dev;
3197 struct dc_link *dc_link = aconnector->dc_link;
3198 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3199 bool result = false;
3200 enum dc_connection_type new_connection_type = dc_connection_none;
3201 struct amdgpu_device *adev = drm_to_adev(dev);
3202 union hpd_irq_data hpd_irq_data;
3203 bool link_loss = false;
3204 bool has_left_work = false;
3205 int idx = aconnector->base.index;
3206 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3208 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3210 if (adev->dm.disable_hpd_irq)
3214 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3215 * conflict, after implement i2c helper, this mutex should be
3218 mutex_lock(&aconnector->hpd_lock);
3220 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3221 &link_loss, true, &has_left_work);
3226 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3227 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3231 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3232 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3233 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3234 dm_handle_mst_sideband_msg(aconnector);
3241 spin_lock(&offload_wq->offload_lock);
3242 skip = offload_wq->is_handling_link_loss;
3245 offload_wq->is_handling_link_loss = true;
3247 spin_unlock(&offload_wq->offload_lock);
3250 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3257 if (result && !is_mst_root_connector) {
3258 /* Downstream Port status changed. */
3259 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3260 DRM_ERROR("KMS: Failed to detect connector\n");
3262 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3263 emulated_link_detect(dc_link);
3265 if (aconnector->fake_enable)
3266 aconnector->fake_enable = false;
3268 amdgpu_dm_update_connector_after_detect(aconnector);
3271 drm_modeset_lock_all(dev);
3272 dm_restore_drm_connector_state(dev, connector);
3273 drm_modeset_unlock_all(dev);
3275 drm_kms_helper_connector_hotplug_event(connector);
3279 mutex_lock(&adev->dm.dc_lock);
3280 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3281 mutex_unlock(&adev->dm.dc_lock);
3284 if (aconnector->fake_enable)
3285 aconnector->fake_enable = false;
3287 amdgpu_dm_update_connector_after_detect(aconnector);
3289 drm_modeset_lock_all(dev);
3290 dm_restore_drm_connector_state(dev, connector);
3291 drm_modeset_unlock_all(dev);
3293 drm_kms_helper_connector_hotplug_event(connector);
3297 #ifdef CONFIG_DRM_AMD_DC_HDCP
3298 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3299 if (adev->dm.hdcp_workqueue)
3300 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3304 if (dc_link->type != dc_connection_mst_branch)
3305 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3307 mutex_unlock(&aconnector->hpd_lock);
3310 static void register_hpd_handlers(struct amdgpu_device *adev)
3312 struct drm_device *dev = adev_to_drm(adev);
3313 struct drm_connector *connector;
3314 struct amdgpu_dm_connector *aconnector;
3315 const struct dc_link *dc_link;
3316 struct dc_interrupt_params int_params = {0};
3318 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 list_for_each_entry(connector,
3322 &dev->mode_config.connector_list, head) {
3324 aconnector = to_amdgpu_dm_connector(connector);
3325 dc_link = aconnector->dc_link;
3327 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3328 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3329 int_params.irq_source = dc_link->irq_source_hpd;
3331 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3333 (void *) aconnector);
3336 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3338 /* Also register for DP short pulse (hpd_rx). */
3339 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3340 int_params.irq_source = dc_link->irq_source_hpd_rx;
3342 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3344 (void *) aconnector);
3346 if (adev->dm.hpd_rx_offload_wq)
3347 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3353 #if defined(CONFIG_DRM_AMD_DC_SI)
3354 /* Register IRQ sources and initialize IRQ callbacks */
3355 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3357 struct dc *dc = adev->dm.dc;
3358 struct common_irq_params *c_irq_params;
3359 struct dc_interrupt_params int_params = {0};
3362 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3364 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3368 * Actions of amdgpu_irq_add_id():
3369 * 1. Register a set() function with base driver.
3370 * Base driver will call set() function to enable/disable an
3371 * interrupt in DC hardware.
3372 * 2. Register amdgpu_dm_irq_handler().
3373 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3374 * coming from DC hardware.
3375 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3376 * for acknowledging and handling. */
3378 /* Use VBLANK interrupt */
3379 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3380 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3382 DRM_ERROR("Failed to add crtc irq id!\n");
3386 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3387 int_params.irq_source =
3388 dc_interrupt_to_irq_source(dc, i+1 , 0);
3390 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3392 c_irq_params->adev = adev;
3393 c_irq_params->irq_src = int_params.irq_source;
3395 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3396 dm_crtc_high_irq, c_irq_params);
3399 /* Use GRPH_PFLIP interrupt */
3400 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3401 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3402 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3404 DRM_ERROR("Failed to add page flip irq id!\n");
3408 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409 int_params.irq_source =
3410 dc_interrupt_to_irq_source(dc, i, 0);
3412 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3414 c_irq_params->adev = adev;
3415 c_irq_params->irq_src = int_params.irq_source;
3417 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418 dm_pflip_high_irq, c_irq_params);
3423 r = amdgpu_irq_add_id(adev, client_id,
3424 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3426 DRM_ERROR("Failed to add hpd irq id!\n");
3430 register_hpd_handlers(adev);
3436 /* Register IRQ sources and initialize IRQ callbacks */
3437 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3439 struct dc *dc = adev->dm.dc;
3440 struct common_irq_params *c_irq_params;
3441 struct dc_interrupt_params int_params = {0};
3444 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3446 if (adev->family >= AMDGPU_FAMILY_AI)
3447 client_id = SOC15_IH_CLIENTID_DCE;
3449 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3453 * Actions of amdgpu_irq_add_id():
3454 * 1. Register a set() function with base driver.
3455 * Base driver will call set() function to enable/disable an
3456 * interrupt in DC hardware.
3457 * 2. Register amdgpu_dm_irq_handler().
3458 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3459 * coming from DC hardware.
3460 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3461 * for acknowledging and handling. */
3463 /* Use VBLANK interrupt */
3464 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3465 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3467 DRM_ERROR("Failed to add crtc irq id!\n");
3471 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3472 int_params.irq_source =
3473 dc_interrupt_to_irq_source(dc, i, 0);
3475 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3477 c_irq_params->adev = adev;
3478 c_irq_params->irq_src = int_params.irq_source;
3480 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3481 dm_crtc_high_irq, c_irq_params);
3484 /* Use VUPDATE interrupt */
3485 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3486 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3488 DRM_ERROR("Failed to add vupdate irq id!\n");
3492 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3493 int_params.irq_source =
3494 dc_interrupt_to_irq_source(dc, i, 0);
3496 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3498 c_irq_params->adev = adev;
3499 c_irq_params->irq_src = int_params.irq_source;
3501 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3502 dm_vupdate_high_irq, c_irq_params);
3505 /* Use GRPH_PFLIP interrupt */
3506 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3507 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3508 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3510 DRM_ERROR("Failed to add page flip irq id!\n");
3514 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 int_params.irq_source =
3516 dc_interrupt_to_irq_source(dc, i, 0);
3518 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3520 c_irq_params->adev = adev;
3521 c_irq_params->irq_src = int_params.irq_source;
3523 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524 dm_pflip_high_irq, c_irq_params);
3529 r = amdgpu_irq_add_id(adev, client_id,
3530 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3532 DRM_ERROR("Failed to add hpd irq id!\n");
3536 register_hpd_handlers(adev);
3541 /* Register IRQ sources and initialize IRQ callbacks */
3542 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3544 struct dc *dc = adev->dm.dc;
3545 struct common_irq_params *c_irq_params;
3546 struct dc_interrupt_params int_params = {0};
3549 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3550 static const unsigned int vrtl_int_srcid[] = {
3551 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3552 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3553 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3554 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3555 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3556 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3560 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3564 * Actions of amdgpu_irq_add_id():
3565 * 1. Register a set() function with base driver.
3566 * Base driver will call set() function to enable/disable an
3567 * interrupt in DC hardware.
3568 * 2. Register amdgpu_dm_irq_handler().
3569 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3570 * coming from DC hardware.
3571 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3572 * for acknowledging and handling.
3575 /* Use VSTARTUP interrupt */
3576 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3577 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3579 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3582 DRM_ERROR("Failed to add crtc irq id!\n");
3586 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3587 int_params.irq_source =
3588 dc_interrupt_to_irq_source(dc, i, 0);
3590 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3592 c_irq_params->adev = adev;
3593 c_irq_params->irq_src = int_params.irq_source;
3595 amdgpu_dm_irq_register_interrupt(
3596 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3599 /* Use otg vertical line interrupt */
3600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3601 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3602 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3603 vrtl_int_srcid[i], &adev->vline0_irq);
3606 DRM_ERROR("Failed to add vline0 irq id!\n");
3610 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3611 int_params.irq_source =
3612 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3614 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3615 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3619 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3620 - DC_IRQ_SOURCE_DC1_VLINE0];
3622 c_irq_params->adev = adev;
3623 c_irq_params->irq_src = int_params.irq_source;
3625 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3630 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3631 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3632 * to trigger at end of each vblank, regardless of state of the lock,
3633 * matching DCE behaviour.
3635 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3636 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3638 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3641 DRM_ERROR("Failed to add vupdate irq id!\n");
3645 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3646 int_params.irq_source =
3647 dc_interrupt_to_irq_source(dc, i, 0);
3649 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3651 c_irq_params->adev = adev;
3652 c_irq_params->irq_src = int_params.irq_source;
3654 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3655 dm_vupdate_high_irq, c_irq_params);
3658 /* Use GRPH_PFLIP interrupt */
3659 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3660 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3662 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3664 DRM_ERROR("Failed to add page flip irq id!\n");
3668 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 int_params.irq_source =
3670 dc_interrupt_to_irq_source(dc, i, 0);
3672 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3674 c_irq_params->adev = adev;
3675 c_irq_params->irq_src = int_params.irq_source;
3677 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3678 dm_pflip_high_irq, c_irq_params);
3683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3686 DRM_ERROR("Failed to add hpd irq id!\n");
3690 register_hpd_handlers(adev);
3694 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3695 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3697 struct dc *dc = adev->dm.dc;
3698 struct common_irq_params *c_irq_params;
3699 struct dc_interrupt_params int_params = {0};
3702 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3705 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3706 &adev->dmub_outbox_irq);
3708 DRM_ERROR("Failed to add outbox irq id!\n");
3712 if (dc->ctx->dmub_srv) {
3713 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3714 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3715 int_params.irq_source =
3716 dc_interrupt_to_irq_source(dc, i, 0);
3718 c_irq_params = &adev->dm.dmub_outbox_params[0];
3720 c_irq_params->adev = adev;
3721 c_irq_params->irq_src = int_params.irq_source;
3723 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3724 dm_dmub_outbox1_low_irq, c_irq_params);
3731 * Acquires the lock for the atomic state object and returns
3732 * the new atomic state.
3734 * This should only be called during atomic check.
3736 int dm_atomic_get_state(struct drm_atomic_state *state,
3737 struct dm_atomic_state **dm_state)
3739 struct drm_device *dev = state->dev;
3740 struct amdgpu_device *adev = drm_to_adev(dev);
3741 struct amdgpu_display_manager *dm = &adev->dm;
3742 struct drm_private_state *priv_state;
3747 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3748 if (IS_ERR(priv_state))
3749 return PTR_ERR(priv_state);
3751 *dm_state = to_dm_atomic_state(priv_state);
3756 static struct dm_atomic_state *
3757 dm_atomic_get_new_state(struct drm_atomic_state *state)
3759 struct drm_device *dev = state->dev;
3760 struct amdgpu_device *adev = drm_to_adev(dev);
3761 struct amdgpu_display_manager *dm = &adev->dm;
3762 struct drm_private_obj *obj;
3763 struct drm_private_state *new_obj_state;
3766 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3767 if (obj->funcs == dm->atomic_obj.funcs)
3768 return to_dm_atomic_state(new_obj_state);
3774 static struct drm_private_state *
3775 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3777 struct dm_atomic_state *old_state, *new_state;
3779 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3783 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3785 old_state = to_dm_atomic_state(obj->state);
3787 if (old_state && old_state->context)
3788 new_state->context = dc_copy_state(old_state->context);
3790 if (!new_state->context) {
3795 return &new_state->base;
3798 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3799 struct drm_private_state *state)
3801 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3803 if (dm_state && dm_state->context)
3804 dc_release_state(dm_state->context);
3809 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3810 .atomic_duplicate_state = dm_atomic_duplicate_state,
3811 .atomic_destroy_state = dm_atomic_destroy_state,
3814 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3816 struct dm_atomic_state *state;
3819 adev->mode_info.mode_config_initialized = true;
3821 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3822 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3824 adev_to_drm(adev)->mode_config.max_width = 16384;
3825 adev_to_drm(adev)->mode_config.max_height = 16384;
3827 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3828 if (adev->asic_type == CHIP_HAWAII)
3829 /* disable prefer shadow for now due to hibernation issues */
3830 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3832 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3833 /* indicates support for immediate flip */
3834 adev_to_drm(adev)->mode_config.async_page_flip = true;
3836 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3838 state = kzalloc(sizeof(*state), GFP_KERNEL);
3842 state->context = dc_create_state(adev->dm.dc);
3843 if (!state->context) {
3848 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3850 drm_atomic_private_obj_init(adev_to_drm(adev),
3851 &adev->dm.atomic_obj,
3853 &dm_atomic_state_funcs);
3855 r = amdgpu_display_modeset_create_props(adev);
3857 dc_release_state(state->context);
3862 r = amdgpu_dm_audio_init(adev);
3864 dc_release_state(state->context);
3872 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3873 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3874 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3876 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3879 #if defined(CONFIG_ACPI)
3880 struct amdgpu_dm_backlight_caps caps;
3882 memset(&caps, 0, sizeof(caps));
3884 if (dm->backlight_caps[bl_idx].caps_valid)
3887 amdgpu_acpi_get_backlight_caps(&caps);
3888 if (caps.caps_valid) {
3889 dm->backlight_caps[bl_idx].caps_valid = true;
3890 if (caps.aux_support)
3892 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3893 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3895 dm->backlight_caps[bl_idx].min_input_signal =
3896 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3897 dm->backlight_caps[bl_idx].max_input_signal =
3898 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3901 if (dm->backlight_caps[bl_idx].aux_support)
3904 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3905 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3909 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3910 unsigned *min, unsigned *max)
3915 if (caps->aux_support) {
3916 // Firmware limits are in nits, DC API wants millinits.
3917 *max = 1000 * caps->aux_max_input_signal;
3918 *min = 1000 * caps->aux_min_input_signal;
3920 // Firmware limits are 8-bit, PWM control is 16-bit.
3921 *max = 0x101 * caps->max_input_signal;
3922 *min = 0x101 * caps->min_input_signal;
3927 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3928 uint32_t brightness)
3932 if (!get_brightness_range(caps, &min, &max))
3935 // Rescale 0..255 to min..max
3936 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3937 AMDGPU_MAX_BL_LEVEL);
3940 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3941 uint32_t brightness)
3945 if (!get_brightness_range(caps, &min, &max))
3948 if (brightness < min)
3950 // Rescale min..max to 0..255
3951 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3955 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3957 u32 user_brightness)
3959 struct amdgpu_dm_backlight_caps caps;
3960 struct dc_link *link;
3964 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3965 caps = dm->backlight_caps[bl_idx];
3967 dm->brightness[bl_idx] = user_brightness;
3968 /* update scratch register */
3970 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3971 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3972 link = (struct dc_link *)dm->backlight_link[bl_idx];
3974 /* Change brightness based on AUX property */
3975 if (caps.aux_support) {
3976 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3977 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3979 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3981 rc = dc_link_set_backlight_level(link, brightness, 0);
3983 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3987 dm->actual_brightness[bl_idx] = user_brightness;
3990 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3992 struct amdgpu_display_manager *dm = bl_get_data(bd);
3995 for (i = 0; i < dm->num_of_edps; i++) {
3996 if (bd == dm->backlight_dev[i])
3999 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4001 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4006 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4009 struct amdgpu_dm_backlight_caps caps;
4010 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4012 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4013 caps = dm->backlight_caps[bl_idx];
4015 if (caps.aux_support) {
4019 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4021 return dm->brightness[bl_idx];
4022 return convert_brightness_to_user(&caps, avg);
4024 int ret = dc_link_get_backlight_level(link);
4026 if (ret == DC_ERROR_UNEXPECTED)
4027 return dm->brightness[bl_idx];
4028 return convert_brightness_to_user(&caps, ret);
4032 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4034 struct amdgpu_display_manager *dm = bl_get_data(bd);
4037 for (i = 0; i < dm->num_of_edps; i++) {
4038 if (bd == dm->backlight_dev[i])
4041 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4043 return amdgpu_dm_backlight_get_level(dm, i);
4046 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4047 .options = BL_CORE_SUSPENDRESUME,
4048 .get_brightness = amdgpu_dm_backlight_get_brightness,
4049 .update_status = amdgpu_dm_backlight_update_status,
4053 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4056 struct backlight_properties props = { 0 };
4058 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4059 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4061 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4062 props.brightness = AMDGPU_MAX_BL_LEVEL;
4063 props.type = BACKLIGHT_RAW;
4065 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4066 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4068 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4069 adev_to_drm(dm->adev)->dev,
4071 &amdgpu_dm_backlight_ops,
4074 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4075 DRM_ERROR("DM: Backlight registration failed!\n");
4077 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4080 static int initialize_plane(struct amdgpu_display_manager *dm,
4081 struct amdgpu_mode_info *mode_info, int plane_id,
4082 enum drm_plane_type plane_type,
4083 const struct dc_plane_cap *plane_cap)
4085 struct drm_plane *plane;
4086 unsigned long possible_crtcs;
4089 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4091 DRM_ERROR("KMS: Failed to allocate plane\n");
4094 plane->type = plane_type;
4097 * HACK: IGT tests expect that the primary plane for a CRTC
4098 * can only have one possible CRTC. Only expose support for
4099 * any CRTC if they're not going to be used as a primary plane
4100 * for a CRTC - like overlay or underlay planes.
4102 possible_crtcs = 1 << plane_id;
4103 if (plane_id >= dm->dc->caps.max_streams)
4104 possible_crtcs = 0xff;
4106 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4109 DRM_ERROR("KMS: Failed to initialize plane\n");
4115 mode_info->planes[plane_id] = plane;
4121 static void register_backlight_device(struct amdgpu_display_manager *dm,
4122 struct dc_link *link)
4124 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4125 link->type != dc_connection_none) {
4127 * Event if registration failed, we should continue with
4128 * DM initialization because not having a backlight control
4129 * is better then a black screen.
4131 if (!dm->backlight_dev[dm->num_of_edps])
4132 amdgpu_dm_register_backlight_device(dm);
4134 if (dm->backlight_dev[dm->num_of_edps]) {
4135 dm->backlight_link[dm->num_of_edps] = link;
4141 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4144 * In this architecture, the association
4145 * connector -> encoder -> crtc
4146 * id not really requried. The crtc and connector will hold the
4147 * display_index as an abstraction to use with DAL component
4149 * Returns 0 on success
4151 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4153 struct amdgpu_display_manager *dm = &adev->dm;
4155 struct amdgpu_dm_connector *aconnector = NULL;
4156 struct amdgpu_encoder *aencoder = NULL;
4157 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4159 int32_t primary_planes;
4160 enum dc_connection_type new_connection_type = dc_connection_none;
4161 const struct dc_plane_cap *plane;
4162 bool psr_feature_enabled = false;
4164 dm->display_indexes_num = dm->dc->caps.max_streams;
4165 /* Update the actual used number of crtc */
4166 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4168 link_cnt = dm->dc->caps.max_links;
4169 if (amdgpu_dm_mode_config_init(dm->adev)) {
4170 DRM_ERROR("DM: Failed to initialize mode config\n");
4174 /* There is one primary plane per CRTC */
4175 primary_planes = dm->dc->caps.max_streams;
4176 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4179 * Initialize primary planes, implicit planes for legacy IOCTLS.
4180 * Order is reversed to match iteration order in atomic check.
4182 for (i = (primary_planes - 1); i >= 0; i--) {
4183 plane = &dm->dc->caps.planes[i];
4185 if (initialize_plane(dm, mode_info, i,
4186 DRM_PLANE_TYPE_PRIMARY, plane)) {
4187 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4193 * Initialize overlay planes, index starting after primary planes.
4194 * These planes have a higher DRM index than the primary planes since
4195 * they should be considered as having a higher z-order.
4196 * Order is reversed to match iteration order in atomic check.
4198 * Only support DCN for now, and only expose one so we don't encourage
4199 * userspace to use up all the pipes.
4201 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4202 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4204 /* Do not create overlay if MPO disabled */
4205 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4208 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4211 if (!plane->blends_with_above || !plane->blends_with_below)
4214 if (!plane->pixel_format_support.argb8888)
4217 if (initialize_plane(dm, NULL, primary_planes + i,
4218 DRM_PLANE_TYPE_OVERLAY, plane)) {
4219 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4223 /* Only create one overlay plane. */
4227 for (i = 0; i < dm->dc->caps.max_streams; i++)
4228 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4229 DRM_ERROR("KMS: Failed to initialize crtc\n");
4233 /* Use Outbox interrupt */
4234 switch (adev->ip_versions[DCE_HWIP][0]) {
4235 case IP_VERSION(3, 0, 0):
4236 case IP_VERSION(3, 1, 2):
4237 case IP_VERSION(3, 1, 3):
4238 case IP_VERSION(3, 1, 4):
4239 case IP_VERSION(3, 1, 5):
4240 case IP_VERSION(3, 1, 6):
4241 case IP_VERSION(3, 2, 0):
4242 case IP_VERSION(3, 2, 1):
4243 case IP_VERSION(2, 1, 0):
4244 if (register_outbox_irq_handlers(dm->adev)) {
4245 DRM_ERROR("DM: Failed to initialize IRQ\n");
4250 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4251 adev->ip_versions[DCE_HWIP][0]);
4254 /* Determine whether to enable PSR support by default. */
4255 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4256 switch (adev->ip_versions[DCE_HWIP][0]) {
4257 case IP_VERSION(3, 1, 2):
4258 case IP_VERSION(3, 1, 3):
4259 case IP_VERSION(3, 1, 4):
4260 case IP_VERSION(3, 1, 5):
4261 case IP_VERSION(3, 1, 6):
4262 case IP_VERSION(3, 2, 0):
4263 case IP_VERSION(3, 2, 1):
4264 psr_feature_enabled = true;
4267 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4272 /* loops over all connectors on the board */
4273 for (i = 0; i < link_cnt; i++) {
4274 struct dc_link *link = NULL;
4276 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4278 "KMS: Cannot support more than %d display indexes\n",
4279 AMDGPU_DM_MAX_DISPLAY_INDEX);
4283 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4287 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4291 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4292 DRM_ERROR("KMS: Failed to initialize encoder\n");
4296 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4297 DRM_ERROR("KMS: Failed to initialize connector\n");
4301 link = dc_get_link_at_index(dm->dc, i);
4303 if (!dc_link_detect_sink(link, &new_connection_type))
4304 DRM_ERROR("KMS: Failed to detect connector\n");
4306 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4307 emulated_link_detect(link);
4308 amdgpu_dm_update_connector_after_detect(aconnector);
4312 mutex_lock(&dm->dc_lock);
4313 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4314 mutex_unlock(&dm->dc_lock);
4317 amdgpu_dm_update_connector_after_detect(aconnector);
4318 register_backlight_device(dm, link);
4320 if (dm->num_of_edps)
4321 update_connector_ext_caps(aconnector);
4323 if (psr_feature_enabled)
4324 amdgpu_dm_set_psr_caps(link);
4326 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4327 * PSR is also supported.
4329 if (link->psr_settings.psr_feature_enabled)
4330 adev_to_drm(adev)->vblank_disable_immediate = false;
4333 amdgpu_set_panel_orientation(&aconnector->base);
4336 /* Software is initialized. Now we can register interrupt handlers. */
4337 switch (adev->asic_type) {
4338 #if defined(CONFIG_DRM_AMD_DC_SI)
4343 if (dce60_register_irq_handlers(dm->adev)) {
4344 DRM_ERROR("DM: Failed to initialize IRQ\n");
4358 case CHIP_POLARIS11:
4359 case CHIP_POLARIS10:
4360 case CHIP_POLARIS12:
4365 if (dce110_register_irq_handlers(dm->adev)) {
4366 DRM_ERROR("DM: Failed to initialize IRQ\n");
4371 switch (adev->ip_versions[DCE_HWIP][0]) {
4372 case IP_VERSION(1, 0, 0):
4373 case IP_VERSION(1, 0, 1):
4374 case IP_VERSION(2, 0, 2):
4375 case IP_VERSION(2, 0, 3):
4376 case IP_VERSION(2, 0, 0):
4377 case IP_VERSION(2, 1, 0):
4378 case IP_VERSION(3, 0, 0):
4379 case IP_VERSION(3, 0, 2):
4380 case IP_VERSION(3, 0, 3):
4381 case IP_VERSION(3, 0, 1):
4382 case IP_VERSION(3, 1, 2):
4383 case IP_VERSION(3, 1, 3):
4384 case IP_VERSION(3, 1, 4):
4385 case IP_VERSION(3, 1, 5):
4386 case IP_VERSION(3, 1, 6):
4387 case IP_VERSION(3, 2, 0):
4388 case IP_VERSION(3, 2, 1):
4389 if (dcn10_register_irq_handlers(dm->adev)) {
4390 DRM_ERROR("DM: Failed to initialize IRQ\n");
4395 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4396 adev->ip_versions[DCE_HWIP][0]);
4410 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4412 drm_atomic_private_obj_fini(&dm->atomic_obj);
4416 /******************************************************************************
4417 * amdgpu_display_funcs functions
4418 *****************************************************************************/
4421 * dm_bandwidth_update - program display watermarks
4423 * @adev: amdgpu_device pointer
4425 * Calculate and program the display watermarks and line buffer allocation.
4427 static void dm_bandwidth_update(struct amdgpu_device *adev)
4429 /* TODO: implement later */
4432 static const struct amdgpu_display_funcs dm_display_funcs = {
4433 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4434 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4435 .backlight_set_level = NULL, /* never called for DC */
4436 .backlight_get_level = NULL, /* never called for DC */
4437 .hpd_sense = NULL,/* called unconditionally */
4438 .hpd_set_polarity = NULL, /* called unconditionally */
4439 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4440 .page_flip_get_scanoutpos =
4441 dm_crtc_get_scanoutpos,/* called unconditionally */
4442 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4443 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4446 #if defined(CONFIG_DEBUG_KERNEL_DC)
4448 static ssize_t s3_debug_store(struct device *device,
4449 struct device_attribute *attr,
4455 struct drm_device *drm_dev = dev_get_drvdata(device);
4456 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4458 ret = kstrtoint(buf, 0, &s3_state);
4463 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4468 return ret == 0 ? count : 0;
4471 DEVICE_ATTR_WO(s3_debug);
4475 static int dm_early_init(void *handle)
4477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4479 switch (adev->asic_type) {
4480 #if defined(CONFIG_DRM_AMD_DC_SI)
4484 adev->mode_info.num_crtc = 6;
4485 adev->mode_info.num_hpd = 6;
4486 adev->mode_info.num_dig = 6;
4489 adev->mode_info.num_crtc = 2;
4490 adev->mode_info.num_hpd = 2;
4491 adev->mode_info.num_dig = 2;
4496 adev->mode_info.num_crtc = 6;
4497 adev->mode_info.num_hpd = 6;
4498 adev->mode_info.num_dig = 6;
4501 adev->mode_info.num_crtc = 4;
4502 adev->mode_info.num_hpd = 6;
4503 adev->mode_info.num_dig = 7;
4507 adev->mode_info.num_crtc = 2;
4508 adev->mode_info.num_hpd = 6;
4509 adev->mode_info.num_dig = 6;
4513 adev->mode_info.num_crtc = 6;
4514 adev->mode_info.num_hpd = 6;
4515 adev->mode_info.num_dig = 7;
4518 adev->mode_info.num_crtc = 3;
4519 adev->mode_info.num_hpd = 6;
4520 adev->mode_info.num_dig = 9;
4523 adev->mode_info.num_crtc = 2;
4524 adev->mode_info.num_hpd = 6;
4525 adev->mode_info.num_dig = 9;
4527 case CHIP_POLARIS11:
4528 case CHIP_POLARIS12:
4529 adev->mode_info.num_crtc = 5;
4530 adev->mode_info.num_hpd = 5;
4531 adev->mode_info.num_dig = 5;
4533 case CHIP_POLARIS10:
4535 adev->mode_info.num_crtc = 6;
4536 adev->mode_info.num_hpd = 6;
4537 adev->mode_info.num_dig = 6;
4542 adev->mode_info.num_crtc = 6;
4543 adev->mode_info.num_hpd = 6;
4544 adev->mode_info.num_dig = 6;
4548 switch (adev->ip_versions[DCE_HWIP][0]) {
4549 case IP_VERSION(2, 0, 2):
4550 case IP_VERSION(3, 0, 0):
4551 adev->mode_info.num_crtc = 6;
4552 adev->mode_info.num_hpd = 6;
4553 adev->mode_info.num_dig = 6;
4555 case IP_VERSION(2, 0, 0):
4556 case IP_VERSION(3, 0, 2):
4557 adev->mode_info.num_crtc = 5;
4558 adev->mode_info.num_hpd = 5;
4559 adev->mode_info.num_dig = 5;
4561 case IP_VERSION(2, 0, 3):
4562 case IP_VERSION(3, 0, 3):
4563 adev->mode_info.num_crtc = 2;
4564 adev->mode_info.num_hpd = 2;
4565 adev->mode_info.num_dig = 2;
4567 case IP_VERSION(1, 0, 0):
4568 case IP_VERSION(1, 0, 1):
4569 case IP_VERSION(3, 0, 1):
4570 case IP_VERSION(2, 1, 0):
4571 case IP_VERSION(3, 1, 2):
4572 case IP_VERSION(3, 1, 3):
4573 case IP_VERSION(3, 1, 4):
4574 case IP_VERSION(3, 1, 5):
4575 case IP_VERSION(3, 1, 6):
4576 case IP_VERSION(3, 2, 0):
4577 case IP_VERSION(3, 2, 1):
4578 adev->mode_info.num_crtc = 4;
4579 adev->mode_info.num_hpd = 4;
4580 adev->mode_info.num_dig = 4;
4583 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4584 adev->ip_versions[DCE_HWIP][0]);
4590 amdgpu_dm_set_irq_funcs(adev);
4592 if (adev->mode_info.funcs == NULL)
4593 adev->mode_info.funcs = &dm_display_funcs;
4596 * Note: Do NOT change adev->audio_endpt_rreg and
4597 * adev->audio_endpt_wreg because they are initialised in
4598 * amdgpu_device_init()
4600 #if defined(CONFIG_DEBUG_KERNEL_DC)
4602 adev_to_drm(adev)->dev,
4603 &dev_attr_s3_debug);
4609 static bool modereset_required(struct drm_crtc_state *crtc_state)
4611 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4614 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4616 drm_encoder_cleanup(encoder);
4620 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4621 .destroy = amdgpu_dm_encoder_destroy,
4625 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4626 const enum surface_pixel_format format,
4627 enum dc_color_space *color_space)
4631 *color_space = COLOR_SPACE_SRGB;
4633 /* DRM color properties only affect non-RGB formats. */
4634 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4637 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4639 switch (plane_state->color_encoding) {
4640 case DRM_COLOR_YCBCR_BT601:
4642 *color_space = COLOR_SPACE_YCBCR601;
4644 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4647 case DRM_COLOR_YCBCR_BT709:
4649 *color_space = COLOR_SPACE_YCBCR709;
4651 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4654 case DRM_COLOR_YCBCR_BT2020:
4656 *color_space = COLOR_SPACE_2020_YCBCR;
4669 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4670 const struct drm_plane_state *plane_state,
4671 const uint64_t tiling_flags,
4672 struct dc_plane_info *plane_info,
4673 struct dc_plane_address *address,
4675 bool force_disable_dcc)
4677 const struct drm_framebuffer *fb = plane_state->fb;
4678 const struct amdgpu_framebuffer *afb =
4679 to_amdgpu_framebuffer(plane_state->fb);
4682 memset(plane_info, 0, sizeof(*plane_info));
4684 switch (fb->format->format) {
4686 plane_info->format =
4687 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4689 case DRM_FORMAT_RGB565:
4690 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4692 case DRM_FORMAT_XRGB8888:
4693 case DRM_FORMAT_ARGB8888:
4694 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4696 case DRM_FORMAT_XRGB2101010:
4697 case DRM_FORMAT_ARGB2101010:
4698 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4700 case DRM_FORMAT_XBGR2101010:
4701 case DRM_FORMAT_ABGR2101010:
4702 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4704 case DRM_FORMAT_XBGR8888:
4705 case DRM_FORMAT_ABGR8888:
4706 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4708 case DRM_FORMAT_NV21:
4709 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4711 case DRM_FORMAT_NV12:
4712 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4714 case DRM_FORMAT_P010:
4715 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4717 case DRM_FORMAT_XRGB16161616F:
4718 case DRM_FORMAT_ARGB16161616F:
4719 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4721 case DRM_FORMAT_XBGR16161616F:
4722 case DRM_FORMAT_ABGR16161616F:
4723 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4725 case DRM_FORMAT_XRGB16161616:
4726 case DRM_FORMAT_ARGB16161616:
4727 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4729 case DRM_FORMAT_XBGR16161616:
4730 case DRM_FORMAT_ABGR16161616:
4731 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4735 "Unsupported screen format %p4cc\n",
4736 &fb->format->format);
4740 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4741 case DRM_MODE_ROTATE_0:
4742 plane_info->rotation = ROTATION_ANGLE_0;
4744 case DRM_MODE_ROTATE_90:
4745 plane_info->rotation = ROTATION_ANGLE_90;
4747 case DRM_MODE_ROTATE_180:
4748 plane_info->rotation = ROTATION_ANGLE_180;
4750 case DRM_MODE_ROTATE_270:
4751 plane_info->rotation = ROTATION_ANGLE_270;
4754 plane_info->rotation = ROTATION_ANGLE_0;
4759 plane_info->visible = true;
4760 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4762 plane_info->layer_index = 0;
4764 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4765 &plane_info->color_space);
4769 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4770 plane_info->rotation, tiling_flags,
4771 &plane_info->tiling_info,
4772 &plane_info->plane_size,
4773 &plane_info->dcc, address,
4774 tmz_surface, force_disable_dcc);
4778 fill_blending_from_plane_state(
4779 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4780 &plane_info->global_alpha, &plane_info->global_alpha_value);
4785 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4786 struct dc_plane_state *dc_plane_state,
4787 struct drm_plane_state *plane_state,
4788 struct drm_crtc_state *crtc_state)
4790 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4791 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4792 struct dc_scaling_info scaling_info;
4793 struct dc_plane_info plane_info;
4795 bool force_disable_dcc = false;
4797 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4801 dc_plane_state->src_rect = scaling_info.src_rect;
4802 dc_plane_state->dst_rect = scaling_info.dst_rect;
4803 dc_plane_state->clip_rect = scaling_info.clip_rect;
4804 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4806 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4807 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4810 &dc_plane_state->address,
4816 dc_plane_state->format = plane_info.format;
4817 dc_plane_state->color_space = plane_info.color_space;
4818 dc_plane_state->format = plane_info.format;
4819 dc_plane_state->plane_size = plane_info.plane_size;
4820 dc_plane_state->rotation = plane_info.rotation;
4821 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4822 dc_plane_state->stereo_format = plane_info.stereo_format;
4823 dc_plane_state->tiling_info = plane_info.tiling_info;
4824 dc_plane_state->visible = plane_info.visible;
4825 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4826 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4827 dc_plane_state->global_alpha = plane_info.global_alpha;
4828 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4829 dc_plane_state->dcc = plane_info.dcc;
4830 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4831 dc_plane_state->flip_int_enabled = true;
4834 * Always set input transfer function, since plane state is refreshed
4837 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4845 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4847 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4849 * @old_plane_state: Old state of @plane
4850 * @new_plane_state: New state of @plane
4851 * @crtc_state: New state of CRTC connected to the @plane
4852 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4854 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4855 * (referred to as "damage clips" in DRM nomenclature) that require updating on
4856 * the eDP remote buffer. The responsibility of specifying the dirty regions is
4859 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4860 * plane with regions that require flushing to the eDP remote buffer. In
4861 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4862 * implicitly provide damage clips without any client support via the plane
4865 * Today, amdgpu_dm only supports the MPO and cursor usecase.
4867 * TODO: Also enable for FB_DAMAGE_CLIPS
4869 static void fill_dc_dirty_rects(struct drm_plane *plane,
4870 struct drm_plane_state *old_plane_state,
4871 struct drm_plane_state *new_plane_state,
4872 struct drm_crtc_state *crtc_state,
4873 struct dc_flip_addrs *flip_addrs)
4875 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4876 struct rect *dirty_rects = flip_addrs->dirty_rects;
4882 flip_addrs->dirty_rect_count = 0;
4885 * Cursor plane has it's own dirty rect update interface. See
4886 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4888 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4892 * Today, we only consider MPO use-case for PSR SU. If MPO not
4893 * requested, and there is a plane update, do FFU.
4895 if (!dm_crtc_state->mpo_requested) {
4896 dirty_rects[0].x = 0;
4897 dirty_rects[0].y = 0;
4898 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4899 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4900 flip_addrs->dirty_rect_count = 1;
4901 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4902 new_plane_state->plane->base.id,
4903 dm_crtc_state->base.mode.crtc_hdisplay,
4904 dm_crtc_state->base.mode.crtc_vdisplay);
4909 * MPO is requested. Add entire plane bounding box to dirty rects if
4910 * flipped to or damaged.
4912 * If plane is moved or resized, also add old bounding box to dirty
4915 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4916 fb_changed = old_plane_state->fb->base.id !=
4917 new_plane_state->fb->base.id;
4918 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4919 old_plane_state->crtc_y != new_plane_state->crtc_y ||
4920 old_plane_state->crtc_w != new_plane_state->crtc_w ||
4921 old_plane_state->crtc_h != new_plane_state->crtc_h);
4923 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4924 new_plane_state->plane->base.id,
4925 bb_changed, fb_changed, num_clips);
4927 if (num_clips || fb_changed || bb_changed) {
4928 dirty_rects[i].x = new_plane_state->crtc_x;
4929 dirty_rects[i].y = new_plane_state->crtc_y;
4930 dirty_rects[i].width = new_plane_state->crtc_w;
4931 dirty_rects[i].height = new_plane_state->crtc_h;
4932 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4933 new_plane_state->plane->base.id,
4934 dirty_rects[i].x, dirty_rects[i].y,
4935 dirty_rects[i].width, dirty_rects[i].height);
4939 /* Add old plane bounding-box if plane is moved or resized */
4941 dirty_rects[i].x = old_plane_state->crtc_x;
4942 dirty_rects[i].y = old_plane_state->crtc_y;
4943 dirty_rects[i].width = old_plane_state->crtc_w;
4944 dirty_rects[i].height = old_plane_state->crtc_h;
4945 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4946 old_plane_state->plane->base.id,
4947 dirty_rects[i].x, dirty_rects[i].y,
4948 dirty_rects[i].width, dirty_rects[i].height);
4952 flip_addrs->dirty_rect_count = i;
4955 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4956 const struct dm_connector_state *dm_state,
4957 struct dc_stream_state *stream)
4959 enum amdgpu_rmx_type rmx_type;
4961 struct rect src = { 0 }; /* viewport in composition space*/
4962 struct rect dst = { 0 }; /* stream addressable area */
4964 /* no mode. nothing to be done */
4968 /* Full screen scaling by default */
4969 src.width = mode->hdisplay;
4970 src.height = mode->vdisplay;
4971 dst.width = stream->timing.h_addressable;
4972 dst.height = stream->timing.v_addressable;
4975 rmx_type = dm_state->scaling;
4976 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4977 if (src.width * dst.height <
4978 src.height * dst.width) {
4979 /* height needs less upscaling/more downscaling */
4980 dst.width = src.width *
4981 dst.height / src.height;
4983 /* width needs less upscaling/more downscaling */
4984 dst.height = src.height *
4985 dst.width / src.width;
4987 } else if (rmx_type == RMX_CENTER) {
4991 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4992 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4994 if (dm_state->underscan_enable) {
4995 dst.x += dm_state->underscan_hborder / 2;
4996 dst.y += dm_state->underscan_vborder / 2;
4997 dst.width -= dm_state->underscan_hborder;
4998 dst.height -= dm_state->underscan_vborder;
5005 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5006 dst.x, dst.y, dst.width, dst.height);
5010 static enum dc_color_depth
5011 convert_color_depth_from_display_info(const struct drm_connector *connector,
5012 bool is_y420, int requested_bpc)
5019 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5020 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5022 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5024 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5027 bpc = (uint8_t)connector->display_info.bpc;
5028 /* Assume 8 bpc by default if no bpc is specified. */
5029 bpc = bpc ? bpc : 8;
5032 if (requested_bpc > 0) {
5034 * Cap display bpc based on the user requested value.
5036 * The value for state->max_bpc may not correctly updated
5037 * depending on when the connector gets added to the state
5038 * or if this was called outside of atomic check, so it
5039 * can't be used directly.
5041 bpc = min_t(u8, bpc, requested_bpc);
5043 /* Round down to the nearest even number. */
5044 bpc = bpc - (bpc & 1);
5050 * Temporary Work around, DRM doesn't parse color depth for
5051 * EDID revision before 1.4
5052 * TODO: Fix edid parsing
5054 return COLOR_DEPTH_888;
5056 return COLOR_DEPTH_666;
5058 return COLOR_DEPTH_888;
5060 return COLOR_DEPTH_101010;
5062 return COLOR_DEPTH_121212;
5064 return COLOR_DEPTH_141414;
5066 return COLOR_DEPTH_161616;
5068 return COLOR_DEPTH_UNDEFINED;
5072 static enum dc_aspect_ratio
5073 get_aspect_ratio(const struct drm_display_mode *mode_in)
5075 /* 1-1 mapping, since both enums follow the HDMI spec. */
5076 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5079 static enum dc_color_space
5080 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5082 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5084 switch (dc_crtc_timing->pixel_encoding) {
5085 case PIXEL_ENCODING_YCBCR422:
5086 case PIXEL_ENCODING_YCBCR444:
5087 case PIXEL_ENCODING_YCBCR420:
5090 * 27030khz is the separation point between HDTV and SDTV
5091 * according to HDMI spec, we use YCbCr709 and YCbCr601
5094 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5095 if (dc_crtc_timing->flags.Y_ONLY)
5097 COLOR_SPACE_YCBCR709_LIMITED;
5099 color_space = COLOR_SPACE_YCBCR709;
5101 if (dc_crtc_timing->flags.Y_ONLY)
5103 COLOR_SPACE_YCBCR601_LIMITED;
5105 color_space = COLOR_SPACE_YCBCR601;
5110 case PIXEL_ENCODING_RGB:
5111 color_space = COLOR_SPACE_SRGB;
5122 static bool adjust_colour_depth_from_display_info(
5123 struct dc_crtc_timing *timing_out,
5124 const struct drm_display_info *info)
5126 enum dc_color_depth depth = timing_out->display_color_depth;
5129 normalized_clk = timing_out->pix_clk_100hz / 10;
5130 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5131 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5132 normalized_clk /= 2;
5133 /* Adjusting pix clock following on HDMI spec based on colour depth */
5135 case COLOR_DEPTH_888:
5137 case COLOR_DEPTH_101010:
5138 normalized_clk = (normalized_clk * 30) / 24;
5140 case COLOR_DEPTH_121212:
5141 normalized_clk = (normalized_clk * 36) / 24;
5143 case COLOR_DEPTH_161616:
5144 normalized_clk = (normalized_clk * 48) / 24;
5147 /* The above depths are the only ones valid for HDMI. */
5150 if (normalized_clk <= info->max_tmds_clock) {
5151 timing_out->display_color_depth = depth;
5154 } while (--depth > COLOR_DEPTH_666);
5158 static void fill_stream_properties_from_drm_display_mode(
5159 struct dc_stream_state *stream,
5160 const struct drm_display_mode *mode_in,
5161 const struct drm_connector *connector,
5162 const struct drm_connector_state *connector_state,
5163 const struct dc_stream_state *old_stream,
5166 struct dc_crtc_timing *timing_out = &stream->timing;
5167 const struct drm_display_info *info = &connector->display_info;
5168 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5169 struct hdmi_vendor_infoframe hv_frame;
5170 struct hdmi_avi_infoframe avi_frame;
5172 memset(&hv_frame, 0, sizeof(hv_frame));
5173 memset(&avi_frame, 0, sizeof(avi_frame));
5175 timing_out->h_border_left = 0;
5176 timing_out->h_border_right = 0;
5177 timing_out->v_border_top = 0;
5178 timing_out->v_border_bottom = 0;
5179 /* TODO: un-hardcode */
5180 if (drm_mode_is_420_only(info, mode_in)
5181 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5182 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5183 else if (drm_mode_is_420_also(info, mode_in)
5184 && aconnector->force_yuv420_output)
5185 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5186 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5187 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5188 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5190 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5192 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5193 timing_out->display_color_depth = convert_color_depth_from_display_info(
5195 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5197 timing_out->scan_type = SCANNING_TYPE_NODATA;
5198 timing_out->hdmi_vic = 0;
5201 timing_out->vic = old_stream->timing.vic;
5202 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5203 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5205 timing_out->vic = drm_match_cea_mode(mode_in);
5206 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5207 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5208 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5209 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5212 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5213 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5214 timing_out->vic = avi_frame.video_code;
5215 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5216 timing_out->hdmi_vic = hv_frame.vic;
5219 if (is_freesync_video_mode(mode_in, aconnector)) {
5220 timing_out->h_addressable = mode_in->hdisplay;
5221 timing_out->h_total = mode_in->htotal;
5222 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5223 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5224 timing_out->v_total = mode_in->vtotal;
5225 timing_out->v_addressable = mode_in->vdisplay;
5226 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5227 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5228 timing_out->pix_clk_100hz = mode_in->clock * 10;
5230 timing_out->h_addressable = mode_in->crtc_hdisplay;
5231 timing_out->h_total = mode_in->crtc_htotal;
5232 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5233 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5234 timing_out->v_total = mode_in->crtc_vtotal;
5235 timing_out->v_addressable = mode_in->crtc_vdisplay;
5236 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5237 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5238 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5241 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5243 stream->output_color_space = get_output_color_space(timing_out);
5245 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5246 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5247 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5248 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5249 drm_mode_is_420_also(info, mode_in) &&
5250 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5251 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5252 adjust_colour_depth_from_display_info(timing_out, info);
5257 static void fill_audio_info(struct audio_info *audio_info,
5258 const struct drm_connector *drm_connector,
5259 const struct dc_sink *dc_sink)
5262 int cea_revision = 0;
5263 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5265 audio_info->manufacture_id = edid_caps->manufacturer_id;
5266 audio_info->product_id = edid_caps->product_id;
5268 cea_revision = drm_connector->display_info.cea_rev;
5270 strscpy(audio_info->display_name,
5271 edid_caps->display_name,
5272 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5274 if (cea_revision >= 3) {
5275 audio_info->mode_count = edid_caps->audio_mode_count;
5277 for (i = 0; i < audio_info->mode_count; ++i) {
5278 audio_info->modes[i].format_code =
5279 (enum audio_format_code)
5280 (edid_caps->audio_modes[i].format_code);
5281 audio_info->modes[i].channel_count =
5282 edid_caps->audio_modes[i].channel_count;
5283 audio_info->modes[i].sample_rates.all =
5284 edid_caps->audio_modes[i].sample_rate;
5285 audio_info->modes[i].sample_size =
5286 edid_caps->audio_modes[i].sample_size;
5290 audio_info->flags.all = edid_caps->speaker_flags;
5292 /* TODO: We only check for the progressive mode, check for interlace mode too */
5293 if (drm_connector->latency_present[0]) {
5294 audio_info->video_latency = drm_connector->video_latency[0];
5295 audio_info->audio_latency = drm_connector->audio_latency[0];
5298 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5303 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5304 struct drm_display_mode *dst_mode)
5306 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5307 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5308 dst_mode->crtc_clock = src_mode->crtc_clock;
5309 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5310 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5311 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5312 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5313 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5314 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5315 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5316 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5317 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5318 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5319 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5323 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5324 const struct drm_display_mode *native_mode,
5327 if (scale_enabled) {
5328 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5329 } else if (native_mode->clock == drm_mode->clock &&
5330 native_mode->htotal == drm_mode->htotal &&
5331 native_mode->vtotal == drm_mode->vtotal) {
5332 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5334 /* no scaling nor amdgpu inserted, no need to patch */
5338 static struct dc_sink *
5339 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5341 struct dc_sink_init_data sink_init_data = { 0 };
5342 struct dc_sink *sink = NULL;
5343 sink_init_data.link = aconnector->dc_link;
5344 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5346 sink = dc_sink_create(&sink_init_data);
5348 DRM_ERROR("Failed to create sink!\n");
5351 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5356 static void set_multisync_trigger_params(
5357 struct dc_stream_state *stream)
5359 struct dc_stream_state *master = NULL;
5361 if (stream->triggered_crtc_reset.enabled) {
5362 master = stream->triggered_crtc_reset.event_source;
5363 stream->triggered_crtc_reset.event =
5364 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5365 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5366 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5370 static void set_master_stream(struct dc_stream_state *stream_set[],
5373 int j, highest_rfr = 0, master_stream = 0;
5375 for (j = 0; j < stream_count; j++) {
5376 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5377 int refresh_rate = 0;
5379 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5380 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5381 if (refresh_rate > highest_rfr) {
5382 highest_rfr = refresh_rate;
5387 for (j = 0; j < stream_count; j++) {
5389 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5393 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5396 struct dc_stream_state *stream;
5398 if (context->stream_count < 2)
5400 for (i = 0; i < context->stream_count ; i++) {
5401 if (!context->streams[i])
5404 * TODO: add a function to read AMD VSDB bits and set
5405 * crtc_sync_master.multi_sync_enabled flag
5406 * For now it's set to false
5410 set_master_stream(context->streams, context->stream_count);
5412 for (i = 0; i < context->stream_count ; i++) {
5413 stream = context->streams[i];
5418 set_multisync_trigger_params(stream);
5423 * DOC: FreeSync Video
5425 * When a userspace application wants to play a video, the content follows a
5426 * standard format definition that usually specifies the FPS for that format.
5427 * The below list illustrates some video format and the expected FPS,
5430 * - TV/NTSC (23.976 FPS)
5433 * - TV/NTSC (29.97 FPS)
5434 * - TV/NTSC (30 FPS)
5435 * - Cinema HFR (48 FPS)
5437 * - Commonly used (60 FPS)
5438 * - Multiples of 24 (48,72,96 FPS)
5440 * The list of standards video format is not huge and can be added to the
5441 * connector modeset list beforehand. With that, userspace can leverage
5442 * FreeSync to extends the front porch in order to attain the target refresh
5443 * rate. Such a switch will happen seamlessly, without screen blanking or
5444 * reprogramming of the output in any other way. If the userspace requests a
5445 * modesetting change compatible with FreeSync modes that only differ in the
5446 * refresh rate, DC will skip the full update and avoid blink during the
5447 * transition. For example, the video player can change the modesetting from
5448 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5449 * causing any display blink. This same concept can be applied to a mode
5452 static struct drm_display_mode *
5453 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5454 bool use_probed_modes)
5456 struct drm_display_mode *m, *m_pref = NULL;
5457 u16 current_refresh, highest_refresh;
5458 struct list_head *list_head = use_probed_modes ?
5459 &aconnector->base.probed_modes :
5460 &aconnector->base.modes;
5462 if (aconnector->freesync_vid_base.clock != 0)
5463 return &aconnector->freesync_vid_base;
5465 /* Find the preferred mode */
5466 list_for_each_entry (m, list_head, head) {
5467 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5474 /* Probably an EDID with no preferred mode. Fallback to first entry */
5475 m_pref = list_first_entry_or_null(
5476 &aconnector->base.modes, struct drm_display_mode, head);
5478 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5483 highest_refresh = drm_mode_vrefresh(m_pref);
5486 * Find the mode with highest refresh rate with same resolution.
5487 * For some monitors, preferred mode is not the mode with highest
5488 * supported refresh rate.
5490 list_for_each_entry (m, list_head, head) {
5491 current_refresh = drm_mode_vrefresh(m);
5493 if (m->hdisplay == m_pref->hdisplay &&
5494 m->vdisplay == m_pref->vdisplay &&
5495 highest_refresh < current_refresh) {
5496 highest_refresh = current_refresh;
5501 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5505 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5506 struct amdgpu_dm_connector *aconnector)
5508 struct drm_display_mode *high_mode;
5511 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5512 if (!high_mode || !mode)
5515 timing_diff = high_mode->vtotal - mode->vtotal;
5517 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5518 high_mode->hdisplay != mode->hdisplay ||
5519 high_mode->vdisplay != mode->vdisplay ||
5520 high_mode->hsync_start != mode->hsync_start ||
5521 high_mode->hsync_end != mode->hsync_end ||
5522 high_mode->htotal != mode->htotal ||
5523 high_mode->hskew != mode->hskew ||
5524 high_mode->vscan != mode->vscan ||
5525 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5526 high_mode->vsync_end - mode->vsync_end != timing_diff)
5532 #if defined(CONFIG_DRM_AMD_DC_DCN)
5533 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5534 struct dc_sink *sink, struct dc_stream_state *stream,
5535 struct dsc_dec_dpcd_caps *dsc_caps)
5537 stream->timing.flags.DSC = 0;
5538 dsc_caps->is_dsc_supported = false;
5540 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5541 sink->sink_signal == SIGNAL_TYPE_EDP)) {
5542 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5543 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5544 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5545 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5546 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5552 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5553 struct dc_sink *sink, struct dc_stream_state *stream,
5554 struct dsc_dec_dpcd_caps *dsc_caps,
5555 uint32_t max_dsc_target_bpp_limit_override)
5557 const struct dc_link_settings *verified_link_cap = NULL;
5558 uint32_t link_bw_in_kbps;
5559 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5560 struct dc *dc = sink->ctx->dc;
5561 struct dc_dsc_bw_range bw_range = {0};
5562 struct dc_dsc_config dsc_cfg = {0};
5564 verified_link_cap = dc_link_get_link_cap(stream->link);
5565 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5566 edp_min_bpp_x16 = 8 * 16;
5567 edp_max_bpp_x16 = 8 * 16;
5569 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5570 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5572 if (edp_max_bpp_x16 < edp_min_bpp_x16)
5573 edp_min_bpp_x16 = edp_max_bpp_x16;
5575 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5576 dc->debug.dsc_min_slice_height_override,
5577 edp_min_bpp_x16, edp_max_bpp_x16,
5582 if (bw_range.max_kbps < link_bw_in_kbps) {
5583 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5585 dc->debug.dsc_min_slice_height_override,
5586 max_dsc_target_bpp_limit_override,
5590 stream->timing.dsc_cfg = dsc_cfg;
5591 stream->timing.flags.DSC = 1;
5592 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5598 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5600 dc->debug.dsc_min_slice_height_override,
5601 max_dsc_target_bpp_limit_override,
5605 stream->timing.dsc_cfg = dsc_cfg;
5606 stream->timing.flags.DSC = 1;
5611 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5612 struct dc_sink *sink, struct dc_stream_state *stream,
5613 struct dsc_dec_dpcd_caps *dsc_caps)
5615 struct drm_connector *drm_connector = &aconnector->base;
5616 uint32_t link_bandwidth_kbps;
5617 uint32_t max_dsc_target_bpp_limit_override = 0;
5618 struct dc *dc = sink->ctx->dc;
5619 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5620 uint32_t dsc_max_supported_bw_in_kbps;
5622 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5623 dc_link_get_link_cap(aconnector->dc_link));
5624 if (stream->link && stream->link->local_sink)
5625 max_dsc_target_bpp_limit_override =
5626 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5628 /* Set DSC policy according to dsc_clock_en */
5629 dc_dsc_policy_set_enable_dsc_when_not_needed(
5630 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5632 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
5633 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5635 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5637 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5638 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5639 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5641 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5642 max_dsc_target_bpp_limit_override,
5643 link_bandwidth_kbps,
5645 &stream->timing.dsc_cfg)) {
5646 stream->timing.flags.DSC = 1;
5647 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5649 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5650 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5651 max_supported_bw_in_kbps = link_bandwidth_kbps;
5652 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5654 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5655 max_supported_bw_in_kbps > 0 &&
5656 dsc_max_supported_bw_in_kbps > 0)
5657 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5659 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5660 max_dsc_target_bpp_limit_override,
5661 dsc_max_supported_bw_in_kbps,
5663 &stream->timing.dsc_cfg)) {
5664 stream->timing.flags.DSC = 1;
5665 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5666 __func__, drm_connector->name);
5671 /* Overwrite the stream flag if DSC is enabled through debugfs */
5672 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5673 stream->timing.flags.DSC = 1;
5675 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5676 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5678 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5679 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5681 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5682 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5684 #endif /* CONFIG_DRM_AMD_DC_DCN */
5686 static struct dc_stream_state *
5687 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5688 const struct drm_display_mode *drm_mode,
5689 const struct dm_connector_state *dm_state,
5690 const struct dc_stream_state *old_stream,
5693 struct drm_display_mode *preferred_mode = NULL;
5694 struct drm_connector *drm_connector;
5695 const struct drm_connector_state *con_state =
5696 dm_state ? &dm_state->base : NULL;
5697 struct dc_stream_state *stream = NULL;
5698 struct drm_display_mode mode = *drm_mode;
5699 struct drm_display_mode saved_mode;
5700 struct drm_display_mode *freesync_mode = NULL;
5701 bool native_mode_found = false;
5702 bool recalculate_timing = false;
5703 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5705 int preferred_refresh = 0;
5706 #if defined(CONFIG_DRM_AMD_DC_DCN)
5707 struct dsc_dec_dpcd_caps dsc_caps;
5710 struct dc_sink *sink = NULL;
5712 memset(&saved_mode, 0, sizeof(saved_mode));
5714 if (aconnector == NULL) {
5715 DRM_ERROR("aconnector is NULL!\n");
5719 drm_connector = &aconnector->base;
5721 if (!aconnector->dc_sink) {
5722 sink = create_fake_sink(aconnector);
5726 sink = aconnector->dc_sink;
5727 dc_sink_retain(sink);
5730 stream = dc_create_stream_for_sink(sink);
5732 if (stream == NULL) {
5733 DRM_ERROR("Failed to create stream for sink!\n");
5737 stream->dm_stream_context = aconnector;
5739 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5740 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5742 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5743 /* Search for preferred mode */
5744 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5745 native_mode_found = true;
5749 if (!native_mode_found)
5750 preferred_mode = list_first_entry_or_null(
5751 &aconnector->base.modes,
5752 struct drm_display_mode,
5755 mode_refresh = drm_mode_vrefresh(&mode);
5757 if (preferred_mode == NULL) {
5759 * This may not be an error, the use case is when we have no
5760 * usermode calls to reset and set mode upon hotplug. In this
5761 * case, we call set mode ourselves to restore the previous mode
5762 * and the modelist may not be filled in in time.
5764 DRM_DEBUG_DRIVER("No preferred mode found\n");
5766 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5767 if (recalculate_timing) {
5768 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5769 drm_mode_copy(&saved_mode, &mode);
5770 drm_mode_copy(&mode, freesync_mode);
5772 decide_crtc_timing_for_drm_display_mode(
5773 &mode, preferred_mode, scale);
5775 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5779 if (recalculate_timing)
5780 drm_mode_set_crtcinfo(&saved_mode, 0);
5782 drm_mode_set_crtcinfo(&mode, 0);
5785 * If scaling is enabled and refresh rate didn't change
5786 * we copy the vic and polarities of the old timings
5788 if (!scale || mode_refresh != preferred_refresh)
5789 fill_stream_properties_from_drm_display_mode(
5790 stream, &mode, &aconnector->base, con_state, NULL,
5793 fill_stream_properties_from_drm_display_mode(
5794 stream, &mode, &aconnector->base, con_state, old_stream,
5797 #if defined(CONFIG_DRM_AMD_DC_DCN)
5798 /* SST DSC determination policy */
5799 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5800 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5801 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5804 update_stream_scaling_settings(&mode, dm_state, stream);
5807 &stream->audio_info,
5811 update_stream_signal(stream, sink);
5813 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5814 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5816 if (stream->link->psr_settings.psr_feature_enabled) {
5818 // should decide stream support vsc sdp colorimetry capability
5819 // before building vsc info packet
5821 stream->use_vsc_sdp_for_colorimetry = false;
5822 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5823 stream->use_vsc_sdp_for_colorimetry =
5824 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5826 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5827 stream->use_vsc_sdp_for_colorimetry = true;
5829 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
5830 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5834 dc_sink_release(sink);
5839 static enum drm_connector_status
5840 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5843 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5847 * 1. This interface is NOT called in context of HPD irq.
5848 * 2. This interface *is called* in context of user-mode ioctl. Which
5849 * makes it a bad place for *any* MST-related activity.
5852 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5853 !aconnector->fake_enable)
5854 connected = (aconnector->dc_sink != NULL);
5856 connected = (aconnector->base.force == DRM_FORCE_ON ||
5857 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5859 update_subconnector_property(aconnector);
5861 return (connected ? connector_status_connected :
5862 connector_status_disconnected);
5865 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5866 struct drm_connector_state *connector_state,
5867 struct drm_property *property,
5870 struct drm_device *dev = connector->dev;
5871 struct amdgpu_device *adev = drm_to_adev(dev);
5872 struct dm_connector_state *dm_old_state =
5873 to_dm_connector_state(connector->state);
5874 struct dm_connector_state *dm_new_state =
5875 to_dm_connector_state(connector_state);
5879 if (property == dev->mode_config.scaling_mode_property) {
5880 enum amdgpu_rmx_type rmx_type;
5883 case DRM_MODE_SCALE_CENTER:
5884 rmx_type = RMX_CENTER;
5886 case DRM_MODE_SCALE_ASPECT:
5887 rmx_type = RMX_ASPECT;
5889 case DRM_MODE_SCALE_FULLSCREEN:
5890 rmx_type = RMX_FULL;
5892 case DRM_MODE_SCALE_NONE:
5898 if (dm_old_state->scaling == rmx_type)
5901 dm_new_state->scaling = rmx_type;
5903 } else if (property == adev->mode_info.underscan_hborder_property) {
5904 dm_new_state->underscan_hborder = val;
5906 } else if (property == adev->mode_info.underscan_vborder_property) {
5907 dm_new_state->underscan_vborder = val;
5909 } else if (property == adev->mode_info.underscan_property) {
5910 dm_new_state->underscan_enable = val;
5912 } else if (property == adev->mode_info.abm_level_property) {
5913 dm_new_state->abm_level = val;
5920 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5921 const struct drm_connector_state *state,
5922 struct drm_property *property,
5925 struct drm_device *dev = connector->dev;
5926 struct amdgpu_device *adev = drm_to_adev(dev);
5927 struct dm_connector_state *dm_state =
5928 to_dm_connector_state(state);
5931 if (property == dev->mode_config.scaling_mode_property) {
5932 switch (dm_state->scaling) {
5934 *val = DRM_MODE_SCALE_CENTER;
5937 *val = DRM_MODE_SCALE_ASPECT;
5940 *val = DRM_MODE_SCALE_FULLSCREEN;
5944 *val = DRM_MODE_SCALE_NONE;
5948 } else if (property == adev->mode_info.underscan_hborder_property) {
5949 *val = dm_state->underscan_hborder;
5951 } else if (property == adev->mode_info.underscan_vborder_property) {
5952 *val = dm_state->underscan_vborder;
5954 } else if (property == adev->mode_info.underscan_property) {
5955 *val = dm_state->underscan_enable;
5957 } else if (property == adev->mode_info.abm_level_property) {
5958 *val = dm_state->abm_level;
5965 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5967 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5969 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5972 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5974 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5975 const struct dc_link *link = aconnector->dc_link;
5976 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5977 struct amdgpu_display_manager *dm = &adev->dm;
5981 * Call only if mst_mgr was initialized before since it's not done
5982 * for all connector types.
5984 if (aconnector->mst_mgr.dev)
5985 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5987 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5988 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5989 for (i = 0; i < dm->num_of_edps; i++) {
5990 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
5991 backlight_device_unregister(dm->backlight_dev[i]);
5992 dm->backlight_dev[i] = NULL;
5997 if (aconnector->dc_em_sink)
5998 dc_sink_release(aconnector->dc_em_sink);
5999 aconnector->dc_em_sink = NULL;
6000 if (aconnector->dc_sink)
6001 dc_sink_release(aconnector->dc_sink);
6002 aconnector->dc_sink = NULL;
6004 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6005 drm_connector_unregister(connector);
6006 drm_connector_cleanup(connector);
6007 if (aconnector->i2c) {
6008 i2c_del_adapter(&aconnector->i2c->base);
6009 kfree(aconnector->i2c);
6011 kfree(aconnector->dm_dp_aux.aux.name);
6016 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6018 struct dm_connector_state *state =
6019 to_dm_connector_state(connector->state);
6021 if (connector->state)
6022 __drm_atomic_helper_connector_destroy_state(connector->state);
6026 state = kzalloc(sizeof(*state), GFP_KERNEL);
6029 state->scaling = RMX_OFF;
6030 state->underscan_enable = false;
6031 state->underscan_hborder = 0;
6032 state->underscan_vborder = 0;
6033 state->base.max_requested_bpc = 8;
6034 state->vcpi_slots = 0;
6037 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6038 state->abm_level = amdgpu_dm_abm_level;
6040 __drm_atomic_helper_connector_reset(connector, &state->base);
6044 struct drm_connector_state *
6045 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6047 struct dm_connector_state *state =
6048 to_dm_connector_state(connector->state);
6050 struct dm_connector_state *new_state =
6051 kmemdup(state, sizeof(*state), GFP_KERNEL);
6056 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6058 new_state->freesync_capable = state->freesync_capable;
6059 new_state->abm_level = state->abm_level;
6060 new_state->scaling = state->scaling;
6061 new_state->underscan_enable = state->underscan_enable;
6062 new_state->underscan_hborder = state->underscan_hborder;
6063 new_state->underscan_vborder = state->underscan_vborder;
6064 new_state->vcpi_slots = state->vcpi_slots;
6065 new_state->pbn = state->pbn;
6066 return &new_state->base;
6070 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6072 struct amdgpu_dm_connector *amdgpu_dm_connector =
6073 to_amdgpu_dm_connector(connector);
6076 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6077 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6078 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6079 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6084 #if defined(CONFIG_DEBUG_FS)
6085 connector_debugfs_init(amdgpu_dm_connector);
6091 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6092 .reset = amdgpu_dm_connector_funcs_reset,
6093 .detect = amdgpu_dm_connector_detect,
6094 .fill_modes = drm_helper_probe_single_connector_modes,
6095 .destroy = amdgpu_dm_connector_destroy,
6096 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6097 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6098 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6099 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6100 .late_register = amdgpu_dm_connector_late_register,
6101 .early_unregister = amdgpu_dm_connector_unregister
6104 static int get_modes(struct drm_connector *connector)
6106 return amdgpu_dm_connector_get_modes(connector);
6109 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6111 struct dc_sink_init_data init_params = {
6112 .link = aconnector->dc_link,
6113 .sink_signal = SIGNAL_TYPE_VIRTUAL
6117 if (!aconnector->base.edid_blob_ptr) {
6118 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6119 aconnector->base.name);
6121 aconnector->base.force = DRM_FORCE_OFF;
6122 aconnector->base.override_edid = false;
6126 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6128 aconnector->edid = edid;
6130 aconnector->dc_em_sink = dc_link_add_remote_sink(
6131 aconnector->dc_link,
6133 (edid->extensions + 1) * EDID_LENGTH,
6136 if (aconnector->base.force == DRM_FORCE_ON) {
6137 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6138 aconnector->dc_link->local_sink :
6139 aconnector->dc_em_sink;
6140 dc_sink_retain(aconnector->dc_sink);
6144 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6146 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6149 * In case of headless boot with force on for DP managed connector
6150 * Those settings have to be != 0 to get initial modeset
6152 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6153 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6154 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6158 aconnector->base.override_edid = true;
6159 create_eml_sink(aconnector);
6162 struct dc_stream_state *
6163 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6164 const struct drm_display_mode *drm_mode,
6165 const struct dm_connector_state *dm_state,
6166 const struct dc_stream_state *old_stream)
6168 struct drm_connector *connector = &aconnector->base;
6169 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6170 struct dc_stream_state *stream;
6171 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6172 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6173 enum dc_status dc_result = DC_OK;
6176 stream = create_stream_for_sink(aconnector, drm_mode,
6177 dm_state, old_stream,
6179 if (stream == NULL) {
6180 DRM_ERROR("Failed to create stream for sink!\n");
6184 dc_result = dc_validate_stream(adev->dm.dc, stream);
6185 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6186 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6188 if (dc_result != DC_OK) {
6189 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6194 dc_status_to_str(dc_result));
6196 dc_stream_release(stream);
6198 requested_bpc -= 2; /* lower bpc to retry validation */
6201 } while (stream == NULL && requested_bpc >= 6);
6203 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6204 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6206 aconnector->force_yuv420_output = true;
6207 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6208 dm_state, old_stream);
6209 aconnector->force_yuv420_output = false;
6215 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6216 struct drm_display_mode *mode)
6218 int result = MODE_ERROR;
6219 struct dc_sink *dc_sink;
6220 /* TODO: Unhardcode stream count */
6221 struct dc_stream_state *stream;
6222 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6224 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6225 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6229 * Only run this the first time mode_valid is called to initilialize
6232 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6233 !aconnector->dc_em_sink)
6234 handle_edid_mgmt(aconnector);
6236 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6238 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6239 aconnector->base.force != DRM_FORCE_ON) {
6240 DRM_ERROR("dc_sink is NULL!\n");
6244 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6246 dc_stream_release(stream);
6251 /* TODO: error handling*/
6255 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6256 struct dc_info_packet *out)
6258 struct hdmi_drm_infoframe frame;
6259 unsigned char buf[30]; /* 26 + 4 */
6263 memset(out, 0, sizeof(*out));
6265 if (!state->hdr_output_metadata)
6268 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6272 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6276 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6280 /* Prepare the infopacket for DC. */
6281 switch (state->connector->connector_type) {
6282 case DRM_MODE_CONNECTOR_HDMIA:
6283 out->hb0 = 0x87; /* type */
6284 out->hb1 = 0x01; /* version */
6285 out->hb2 = 0x1A; /* length */
6286 out->sb[0] = buf[3]; /* checksum */
6290 case DRM_MODE_CONNECTOR_DisplayPort:
6291 case DRM_MODE_CONNECTOR_eDP:
6292 out->hb0 = 0x00; /* sdp id, zero */
6293 out->hb1 = 0x87; /* type */
6294 out->hb2 = 0x1D; /* payload len - 1 */
6295 out->hb3 = (0x13 << 2); /* sdp version */
6296 out->sb[0] = 0x01; /* version */
6297 out->sb[1] = 0x1A; /* length */
6305 memcpy(&out->sb[i], &buf[4], 26);
6308 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6309 sizeof(out->sb), false);
6315 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6316 struct drm_atomic_state *state)
6318 struct drm_connector_state *new_con_state =
6319 drm_atomic_get_new_connector_state(state, conn);
6320 struct drm_connector_state *old_con_state =
6321 drm_atomic_get_old_connector_state(state, conn);
6322 struct drm_crtc *crtc = new_con_state->crtc;
6323 struct drm_crtc_state *new_crtc_state;
6326 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6331 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6332 struct dc_info_packet hdr_infopacket;
6334 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6338 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6339 if (IS_ERR(new_crtc_state))
6340 return PTR_ERR(new_crtc_state);
6343 * DC considers the stream backends changed if the
6344 * static metadata changes. Forcing the modeset also
6345 * gives a simple way for userspace to switch from
6346 * 8bpc to 10bpc when setting the metadata to enter
6349 * Changing the static metadata after it's been
6350 * set is permissible, however. So only force a
6351 * modeset if we're entering or exiting HDR.
6353 new_crtc_state->mode_changed =
6354 !old_con_state->hdr_output_metadata ||
6355 !new_con_state->hdr_output_metadata;
6361 static const struct drm_connector_helper_funcs
6362 amdgpu_dm_connector_helper_funcs = {
6364 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6365 * modes will be filtered by drm_mode_validate_size(), and those modes
6366 * are missing after user start lightdm. So we need to renew modes list.
6367 * in get_modes call back, not just return the modes count
6369 .get_modes = get_modes,
6370 .mode_valid = amdgpu_dm_connector_mode_valid,
6371 .atomic_check = amdgpu_dm_connector_atomic_check,
6374 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6379 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6381 switch (display_color_depth) {
6382 case COLOR_DEPTH_666:
6384 case COLOR_DEPTH_888:
6386 case COLOR_DEPTH_101010:
6388 case COLOR_DEPTH_121212:
6390 case COLOR_DEPTH_141414:
6392 case COLOR_DEPTH_161616:
6400 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6401 struct drm_crtc_state *crtc_state,
6402 struct drm_connector_state *conn_state)
6404 struct drm_atomic_state *state = crtc_state->state;
6405 struct drm_connector *connector = conn_state->connector;
6406 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6407 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6408 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6409 struct drm_dp_mst_topology_mgr *mst_mgr;
6410 struct drm_dp_mst_port *mst_port;
6411 enum dc_color_depth color_depth;
6413 bool is_y420 = false;
6415 if (!aconnector->port || !aconnector->dc_sink)
6418 mst_port = aconnector->port;
6419 mst_mgr = &aconnector->mst_port->mst_mgr;
6421 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6424 if (!state->duplicated) {
6425 int max_bpc = conn_state->max_requested_bpc;
6426 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6427 aconnector->force_yuv420_output;
6428 color_depth = convert_color_depth_from_display_info(connector,
6431 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6432 clock = adjusted_mode->clock;
6433 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6435 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6438 dm_new_connector_state->pbn,
6439 dm_mst_get_pbn_divider(aconnector->dc_link));
6440 if (dm_new_connector_state->vcpi_slots < 0) {
6441 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6442 return dm_new_connector_state->vcpi_slots;
6447 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6448 .disable = dm_encoder_helper_disable,
6449 .atomic_check = dm_encoder_helper_atomic_check
6452 #if defined(CONFIG_DRM_AMD_DC_DCN)
6453 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6454 struct dc_state *dc_state,
6455 struct dsc_mst_fairness_vars *vars)
6457 struct dc_stream_state *stream = NULL;
6458 struct drm_connector *connector;
6459 struct drm_connector_state *new_con_state;
6460 struct amdgpu_dm_connector *aconnector;
6461 struct dm_connector_state *dm_conn_state;
6463 int vcpi, pbn_div, pbn, slot_num = 0;
6465 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6467 aconnector = to_amdgpu_dm_connector(connector);
6469 if (!aconnector->port)
6472 if (!new_con_state || !new_con_state->crtc)
6475 dm_conn_state = to_dm_connector_state(new_con_state);
6477 for (j = 0; j < dc_state->stream_count; j++) {
6478 stream = dc_state->streams[j];
6482 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6491 pbn_div = dm_mst_get_pbn_divider(stream->link);
6492 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6493 for (j = 0; j < dc_state->stream_count; j++) {
6494 if (vars[j].aconnector == aconnector) {
6500 if (j == dc_state->stream_count)
6503 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6505 if (stream->timing.flags.DSC != 1) {
6506 dm_conn_state->pbn = pbn;
6507 dm_conn_state->vcpi_slots = slot_num;
6509 drm_dp_mst_atomic_enable_dsc(state,
6517 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6524 dm_conn_state->pbn = pbn;
6525 dm_conn_state->vcpi_slots = vcpi;
6531 static int to_drm_connector_type(enum signal_type st)
6534 case SIGNAL_TYPE_HDMI_TYPE_A:
6535 return DRM_MODE_CONNECTOR_HDMIA;
6536 case SIGNAL_TYPE_EDP:
6537 return DRM_MODE_CONNECTOR_eDP;
6538 case SIGNAL_TYPE_LVDS:
6539 return DRM_MODE_CONNECTOR_LVDS;
6540 case SIGNAL_TYPE_RGB:
6541 return DRM_MODE_CONNECTOR_VGA;
6542 case SIGNAL_TYPE_DISPLAY_PORT:
6543 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6544 return DRM_MODE_CONNECTOR_DisplayPort;
6545 case SIGNAL_TYPE_DVI_DUAL_LINK:
6546 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6547 return DRM_MODE_CONNECTOR_DVID;
6548 case SIGNAL_TYPE_VIRTUAL:
6549 return DRM_MODE_CONNECTOR_VIRTUAL;
6552 return DRM_MODE_CONNECTOR_Unknown;
6556 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6558 struct drm_encoder *encoder;
6560 /* There is only one encoder per connector */
6561 drm_connector_for_each_possible_encoder(connector, encoder)
6567 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6569 struct drm_encoder *encoder;
6570 struct amdgpu_encoder *amdgpu_encoder;
6572 encoder = amdgpu_dm_connector_to_encoder(connector);
6574 if (encoder == NULL)
6577 amdgpu_encoder = to_amdgpu_encoder(encoder);
6579 amdgpu_encoder->native_mode.clock = 0;
6581 if (!list_empty(&connector->probed_modes)) {
6582 struct drm_display_mode *preferred_mode = NULL;
6584 list_for_each_entry(preferred_mode,
6585 &connector->probed_modes,
6587 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6588 amdgpu_encoder->native_mode = *preferred_mode;
6596 static struct drm_display_mode *
6597 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6599 int hdisplay, int vdisplay)
6601 struct drm_device *dev = encoder->dev;
6602 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6603 struct drm_display_mode *mode = NULL;
6604 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6606 mode = drm_mode_duplicate(dev, native_mode);
6611 mode->hdisplay = hdisplay;
6612 mode->vdisplay = vdisplay;
6613 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6614 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6620 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6621 struct drm_connector *connector)
6623 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6624 struct drm_display_mode *mode = NULL;
6625 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6626 struct amdgpu_dm_connector *amdgpu_dm_connector =
6627 to_amdgpu_dm_connector(connector);
6631 char name[DRM_DISPLAY_MODE_LEN];
6634 } common_modes[] = {
6635 { "640x480", 640, 480},
6636 { "800x600", 800, 600},
6637 { "1024x768", 1024, 768},
6638 { "1280x720", 1280, 720},
6639 { "1280x800", 1280, 800},
6640 {"1280x1024", 1280, 1024},
6641 { "1440x900", 1440, 900},
6642 {"1680x1050", 1680, 1050},
6643 {"1600x1200", 1600, 1200},
6644 {"1920x1080", 1920, 1080},
6645 {"1920x1200", 1920, 1200}
6648 n = ARRAY_SIZE(common_modes);
6650 for (i = 0; i < n; i++) {
6651 struct drm_display_mode *curmode = NULL;
6652 bool mode_existed = false;
6654 if (common_modes[i].w > native_mode->hdisplay ||
6655 common_modes[i].h > native_mode->vdisplay ||
6656 (common_modes[i].w == native_mode->hdisplay &&
6657 common_modes[i].h == native_mode->vdisplay))
6660 list_for_each_entry(curmode, &connector->probed_modes, head) {
6661 if (common_modes[i].w == curmode->hdisplay &&
6662 common_modes[i].h == curmode->vdisplay) {
6663 mode_existed = true;
6671 mode = amdgpu_dm_create_common_mode(encoder,
6672 common_modes[i].name, common_modes[i].w,
6677 drm_mode_probed_add(connector, mode);
6678 amdgpu_dm_connector->num_modes++;
6682 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6684 struct drm_encoder *encoder;
6685 struct amdgpu_encoder *amdgpu_encoder;
6686 const struct drm_display_mode *native_mode;
6688 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6689 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6692 mutex_lock(&connector->dev->mode_config.mutex);
6693 amdgpu_dm_connector_get_modes(connector);
6694 mutex_unlock(&connector->dev->mode_config.mutex);
6696 encoder = amdgpu_dm_connector_to_encoder(connector);
6700 amdgpu_encoder = to_amdgpu_encoder(encoder);
6702 native_mode = &amdgpu_encoder->native_mode;
6703 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6706 drm_connector_set_panel_orientation_with_quirk(connector,
6707 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6708 native_mode->hdisplay,
6709 native_mode->vdisplay);
6712 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6715 struct amdgpu_dm_connector *amdgpu_dm_connector =
6716 to_amdgpu_dm_connector(connector);
6719 /* empty probed_modes */
6720 INIT_LIST_HEAD(&connector->probed_modes);
6721 amdgpu_dm_connector->num_modes =
6722 drm_add_edid_modes(connector, edid);
6724 /* sorting the probed modes before calling function
6725 * amdgpu_dm_get_native_mode() since EDID can have
6726 * more than one preferred mode. The modes that are
6727 * later in the probed mode list could be of higher
6728 * and preferred resolution. For example, 3840x2160
6729 * resolution in base EDID preferred timing and 4096x2160
6730 * preferred resolution in DID extension block later.
6732 drm_mode_sort(&connector->probed_modes);
6733 amdgpu_dm_get_native_mode(connector);
6735 /* Freesync capabilities are reset by calling
6736 * drm_add_edid_modes() and need to be
6739 amdgpu_dm_update_freesync_caps(connector, edid);
6741 amdgpu_dm_connector->num_modes = 0;
6745 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6746 struct drm_display_mode *mode)
6748 struct drm_display_mode *m;
6750 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6751 if (drm_mode_equal(m, mode))
6758 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6760 const struct drm_display_mode *m;
6761 struct drm_display_mode *new_mode;
6763 uint32_t new_modes_count = 0;
6765 /* Standard FPS values
6774 * 60 - Commonly used
6775 * 48,72,96,120 - Multiples of 24
6777 static const uint32_t common_rates[] = {
6778 23976, 24000, 25000, 29970, 30000,
6779 48000, 50000, 60000, 72000, 96000, 120000
6783 * Find mode with highest refresh rate with the same resolution
6784 * as the preferred mode. Some monitors report a preferred mode
6785 * with lower resolution than the highest refresh rate supported.
6788 m = get_highest_refresh_rate_mode(aconnector, true);
6792 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6793 uint64_t target_vtotal, target_vtotal_diff;
6796 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6799 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6800 common_rates[i] > aconnector->max_vfreq * 1000)
6803 num = (unsigned long long)m->clock * 1000 * 1000;
6804 den = common_rates[i] * (unsigned long long)m->htotal;
6805 target_vtotal = div_u64(num, den);
6806 target_vtotal_diff = target_vtotal - m->vtotal;
6808 /* Check for illegal modes */
6809 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6810 m->vsync_end + target_vtotal_diff < m->vsync_start ||
6811 m->vtotal + target_vtotal_diff < m->vsync_end)
6814 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6818 new_mode->vtotal += (u16)target_vtotal_diff;
6819 new_mode->vsync_start += (u16)target_vtotal_diff;
6820 new_mode->vsync_end += (u16)target_vtotal_diff;
6821 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6822 new_mode->type |= DRM_MODE_TYPE_DRIVER;
6824 if (!is_duplicate_mode(aconnector, new_mode)) {
6825 drm_mode_probed_add(&aconnector->base, new_mode);
6826 new_modes_count += 1;
6828 drm_mode_destroy(aconnector->base.dev, new_mode);
6831 return new_modes_count;
6834 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6837 struct amdgpu_dm_connector *amdgpu_dm_connector =
6838 to_amdgpu_dm_connector(connector);
6843 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6844 amdgpu_dm_connector->num_modes +=
6845 add_fs_modes(amdgpu_dm_connector);
6848 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6850 struct amdgpu_dm_connector *amdgpu_dm_connector =
6851 to_amdgpu_dm_connector(connector);
6852 struct drm_encoder *encoder;
6853 struct edid *edid = amdgpu_dm_connector->edid;
6855 encoder = amdgpu_dm_connector_to_encoder(connector);
6857 if (!drm_edid_is_valid(edid)) {
6858 amdgpu_dm_connector->num_modes =
6859 drm_add_modes_noedid(connector, 640, 480);
6861 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6862 amdgpu_dm_connector_add_common_modes(encoder, connector);
6863 amdgpu_dm_connector_add_freesync_modes(connector, edid);
6865 amdgpu_dm_fbc_init(connector);
6867 return amdgpu_dm_connector->num_modes;
6870 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6871 struct amdgpu_dm_connector *aconnector,
6873 struct dc_link *link,
6876 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6879 * Some of the properties below require access to state, like bpc.
6880 * Allocate some default initial connector state with our reset helper.
6882 if (aconnector->base.funcs->reset)
6883 aconnector->base.funcs->reset(&aconnector->base);
6885 aconnector->connector_id = link_index;
6886 aconnector->dc_link = link;
6887 aconnector->base.interlace_allowed = false;
6888 aconnector->base.doublescan_allowed = false;
6889 aconnector->base.stereo_allowed = false;
6890 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6891 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6892 aconnector->audio_inst = -1;
6893 mutex_init(&aconnector->hpd_lock);
6896 * configure support HPD hot plug connector_>polled default value is 0
6897 * which means HPD hot plug not supported
6899 switch (connector_type) {
6900 case DRM_MODE_CONNECTOR_HDMIA:
6901 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6902 aconnector->base.ycbcr_420_allowed =
6903 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6905 case DRM_MODE_CONNECTOR_DisplayPort:
6906 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6907 link->link_enc = link_enc_cfg_get_link_enc(link);
6908 ASSERT(link->link_enc);
6910 aconnector->base.ycbcr_420_allowed =
6911 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6913 case DRM_MODE_CONNECTOR_DVID:
6914 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6920 drm_object_attach_property(&aconnector->base.base,
6921 dm->ddev->mode_config.scaling_mode_property,
6922 DRM_MODE_SCALE_NONE);
6924 drm_object_attach_property(&aconnector->base.base,
6925 adev->mode_info.underscan_property,
6927 drm_object_attach_property(&aconnector->base.base,
6928 adev->mode_info.underscan_hborder_property,
6930 drm_object_attach_property(&aconnector->base.base,
6931 adev->mode_info.underscan_vborder_property,
6934 if (!aconnector->mst_port)
6935 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6937 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6938 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6939 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6941 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6942 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6943 drm_object_attach_property(&aconnector->base.base,
6944 adev->mode_info.abm_level_property, 0);
6947 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6948 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6949 connector_type == DRM_MODE_CONNECTOR_eDP) {
6950 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
6952 if (!aconnector->mst_port)
6953 drm_connector_attach_vrr_capable_property(&aconnector->base);
6955 #ifdef CONFIG_DRM_AMD_DC_HDCP
6956 if (adev->dm.hdcp_workqueue)
6957 drm_connector_attach_content_protection_property(&aconnector->base, true);
6962 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6963 struct i2c_msg *msgs, int num)
6965 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6966 struct ddc_service *ddc_service = i2c->ddc_service;
6967 struct i2c_command cmd;
6971 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6976 cmd.number_of_payloads = num;
6977 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6980 for (i = 0; i < num; i++) {
6981 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6982 cmd.payloads[i].address = msgs[i].addr;
6983 cmd.payloads[i].length = msgs[i].len;
6984 cmd.payloads[i].data = msgs[i].buf;
6988 ddc_service->ctx->dc,
6989 ddc_service->link->link_index,
6993 kfree(cmd.payloads);
6997 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6999 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7002 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7003 .master_xfer = amdgpu_dm_i2c_xfer,
7004 .functionality = amdgpu_dm_i2c_func,
7007 static struct amdgpu_i2c_adapter *
7008 create_i2c(struct ddc_service *ddc_service,
7012 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7013 struct amdgpu_i2c_adapter *i2c;
7015 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7018 i2c->base.owner = THIS_MODULE;
7019 i2c->base.class = I2C_CLASS_DDC;
7020 i2c->base.dev.parent = &adev->pdev->dev;
7021 i2c->base.algo = &amdgpu_dm_i2c_algo;
7022 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7023 i2c_set_adapdata(&i2c->base, i2c);
7024 i2c->ddc_service = ddc_service;
7031 * Note: this function assumes that dc_link_detect() was called for the
7032 * dc_link which will be represented by this aconnector.
7034 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7035 struct amdgpu_dm_connector *aconnector,
7036 uint32_t link_index,
7037 struct amdgpu_encoder *aencoder)
7041 struct dc *dc = dm->dc;
7042 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7043 struct amdgpu_i2c_adapter *i2c;
7045 link->priv = aconnector;
7047 DRM_DEBUG_DRIVER("%s()\n", __func__);
7049 i2c = create_i2c(link->ddc, link->link_index, &res);
7051 DRM_ERROR("Failed to create i2c adapter data\n");
7055 aconnector->i2c = i2c;
7056 res = i2c_add_adapter(&i2c->base);
7059 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7063 connector_type = to_drm_connector_type(link->connector_signal);
7065 res = drm_connector_init_with_ddc(
7068 &amdgpu_dm_connector_funcs,
7073 DRM_ERROR("connector_init failed\n");
7074 aconnector->connector_id = -1;
7078 drm_connector_helper_add(
7080 &amdgpu_dm_connector_helper_funcs);
7082 amdgpu_dm_connector_init_helper(
7089 drm_connector_attach_encoder(
7090 &aconnector->base, &aencoder->base);
7092 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7093 || connector_type == DRM_MODE_CONNECTOR_eDP)
7094 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7099 aconnector->i2c = NULL;
7104 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7106 switch (adev->mode_info.num_crtc) {
7123 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7124 struct amdgpu_encoder *aencoder,
7125 uint32_t link_index)
7127 struct amdgpu_device *adev = drm_to_adev(dev);
7129 int res = drm_encoder_init(dev,
7131 &amdgpu_dm_encoder_funcs,
7132 DRM_MODE_ENCODER_TMDS,
7135 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7138 aencoder->encoder_id = link_index;
7140 aencoder->encoder_id = -1;
7142 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7147 static void manage_dm_interrupts(struct amdgpu_device *adev,
7148 struct amdgpu_crtc *acrtc,
7152 * We have no guarantee that the frontend index maps to the same
7153 * backend index - some even map to more than one.
7155 * TODO: Use a different interrupt or check DC itself for the mapping.
7158 amdgpu_display_crtc_idx_to_irq_type(
7163 drm_crtc_vblank_on(&acrtc->base);
7166 &adev->pageflip_irq,
7168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7175 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7183 &adev->pageflip_irq,
7185 drm_crtc_vblank_off(&acrtc->base);
7189 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7190 struct amdgpu_crtc *acrtc)
7193 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7196 * This reads the current state for the IRQ and force reapplies
7197 * the setting to hardware.
7199 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7203 is_scaling_state_different(const struct dm_connector_state *dm_state,
7204 const struct dm_connector_state *old_dm_state)
7206 if (dm_state->scaling != old_dm_state->scaling)
7208 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7209 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7211 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7212 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7214 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7215 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7220 #ifdef CONFIG_DRM_AMD_DC_HDCP
7221 static bool is_content_protection_different(struct drm_connector_state *state,
7222 const struct drm_connector_state *old_state,
7223 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7225 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7226 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7228 /* Handle: Type0/1 change */
7229 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7230 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7231 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7235 /* CP is being re enabled, ignore this
7237 * Handles: ENABLED -> DESIRED
7239 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7240 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7241 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7245 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7247 * Handles: UNDESIRED -> ENABLED
7249 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7250 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7251 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7253 /* Stream removed and re-enabled
7255 * Can sometimes overlap with the HPD case,
7256 * thus set update_hdcp to false to avoid
7257 * setting HDCP multiple times.
7259 * Handles: DESIRED -> DESIRED (Special case)
7261 if (!(old_state->crtc && old_state->crtc->enabled) &&
7262 state->crtc && state->crtc->enabled &&
7263 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7264 dm_con_state->update_hdcp = false;
7268 /* Hot-plug, headless s3, dpms
7270 * Only start HDCP if the display is connected/enabled.
7271 * update_hdcp flag will be set to false until the next
7274 * Handles: DESIRED -> DESIRED (Special case)
7276 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7277 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7278 dm_con_state->update_hdcp = false;
7283 * Handles: UNDESIRED -> UNDESIRED
7284 * DESIRED -> DESIRED
7285 * ENABLED -> ENABLED
7287 if (old_state->content_protection == state->content_protection)
7291 * Handles: UNDESIRED -> DESIRED
7292 * DESIRED -> UNDESIRED
7293 * ENABLED -> UNDESIRED
7295 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7299 * Handles: DESIRED -> ENABLED
7305 static void remove_stream(struct amdgpu_device *adev,
7306 struct amdgpu_crtc *acrtc,
7307 struct dc_stream_state *stream)
7309 /* this is the update mode case */
7311 acrtc->otg_inst = -1;
7312 acrtc->enabled = false;
7315 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7318 assert_spin_locked(&acrtc->base.dev->event_lock);
7319 WARN_ON(acrtc->event);
7321 acrtc->event = acrtc->base.state->event;
7323 /* Set the flip status */
7324 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7326 /* Mark this event as consumed */
7327 acrtc->base.state->event = NULL;
7329 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7333 static void update_freesync_state_on_stream(
7334 struct amdgpu_display_manager *dm,
7335 struct dm_crtc_state *new_crtc_state,
7336 struct dc_stream_state *new_stream,
7337 struct dc_plane_state *surface,
7338 u32 flip_timestamp_in_us)
7340 struct mod_vrr_params vrr_params;
7341 struct dc_info_packet vrr_infopacket = {0};
7342 struct amdgpu_device *adev = dm->adev;
7343 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7344 unsigned long flags;
7345 bool pack_sdp_v1_3 = false;
7351 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7352 * For now it's sufficient to just guard against these conditions.
7355 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7358 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7359 vrr_params = acrtc->dm_irq_params.vrr_params;
7362 mod_freesync_handle_preflip(
7363 dm->freesync_module,
7366 flip_timestamp_in_us,
7369 if (adev->family < AMDGPU_FAMILY_AI &&
7370 amdgpu_dm_vrr_active(new_crtc_state)) {
7371 mod_freesync_handle_v_update(dm->freesync_module,
7372 new_stream, &vrr_params);
7374 /* Need to call this before the frame ends. */
7375 dc_stream_adjust_vmin_vmax(dm->dc,
7376 new_crtc_state->stream,
7377 &vrr_params.adjust);
7381 mod_freesync_build_vrr_infopacket(
7382 dm->freesync_module,
7386 TRANSFER_FUNC_UNKNOWN,
7390 new_crtc_state->freesync_timing_changed |=
7391 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7393 sizeof(vrr_params.adjust)) != 0);
7395 new_crtc_state->freesync_vrr_info_changed |=
7396 (memcmp(&new_crtc_state->vrr_infopacket,
7398 sizeof(vrr_infopacket)) != 0);
7400 acrtc->dm_irq_params.vrr_params = vrr_params;
7401 new_crtc_state->vrr_infopacket = vrr_infopacket;
7403 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7404 new_stream->vrr_infopacket = vrr_infopacket;
7406 if (new_crtc_state->freesync_vrr_info_changed)
7407 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7408 new_crtc_state->base.crtc->base.id,
7409 (int)new_crtc_state->base.vrr_enabled,
7410 (int)vrr_params.state);
7412 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7415 static void update_stream_irq_parameters(
7416 struct amdgpu_display_manager *dm,
7417 struct dm_crtc_state *new_crtc_state)
7419 struct dc_stream_state *new_stream = new_crtc_state->stream;
7420 struct mod_vrr_params vrr_params;
7421 struct mod_freesync_config config = new_crtc_state->freesync_config;
7422 struct amdgpu_device *adev = dm->adev;
7423 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7424 unsigned long flags;
7430 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7431 * For now it's sufficient to just guard against these conditions.
7433 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7436 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7437 vrr_params = acrtc->dm_irq_params.vrr_params;
7439 if (new_crtc_state->vrr_supported &&
7440 config.min_refresh_in_uhz &&
7441 config.max_refresh_in_uhz) {
7443 * if freesync compatible mode was set, config.state will be set
7446 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7447 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7448 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7449 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7450 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7451 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7452 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7454 config.state = new_crtc_state->base.vrr_enabled ?
7455 VRR_STATE_ACTIVE_VARIABLE :
7459 config.state = VRR_STATE_UNSUPPORTED;
7462 mod_freesync_build_vrr_params(dm->freesync_module,
7464 &config, &vrr_params);
7466 new_crtc_state->freesync_timing_changed |=
7467 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7468 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7470 new_crtc_state->freesync_config = config;
7471 /* Copy state for access from DM IRQ handler */
7472 acrtc->dm_irq_params.freesync_config = config;
7473 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7474 acrtc->dm_irq_params.vrr_params = vrr_params;
7475 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7478 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7479 struct dm_crtc_state *new_state)
7481 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7482 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7484 if (!old_vrr_active && new_vrr_active) {
7485 /* Transition VRR inactive -> active:
7486 * While VRR is active, we must not disable vblank irq, as a
7487 * reenable after disable would compute bogus vblank/pflip
7488 * timestamps if it likely happened inside display front-porch.
7490 * We also need vupdate irq for the actual core vblank handling
7493 dm_set_vupdate_irq(new_state->base.crtc, true);
7494 drm_crtc_vblank_get(new_state->base.crtc);
7495 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7496 __func__, new_state->base.crtc->base.id);
7497 } else if (old_vrr_active && !new_vrr_active) {
7498 /* Transition VRR active -> inactive:
7499 * Allow vblank irq disable again for fixed refresh rate.
7501 dm_set_vupdate_irq(new_state->base.crtc, false);
7502 drm_crtc_vblank_put(new_state->base.crtc);
7503 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7504 __func__, new_state->base.crtc->base.id);
7508 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7510 struct drm_plane *plane;
7511 struct drm_plane_state *old_plane_state;
7515 * TODO: Make this per-stream so we don't issue redundant updates for
7516 * commits with multiple streams.
7518 for_each_old_plane_in_state(state, plane, old_plane_state, i)
7519 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7520 handle_cursor_update(plane, old_plane_state);
7523 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7524 struct dc_state *dc_state,
7525 struct drm_device *dev,
7526 struct amdgpu_display_manager *dm,
7527 struct drm_crtc *pcrtc,
7528 bool wait_for_vblank)
7531 uint64_t timestamp_ns;
7532 struct drm_plane *plane;
7533 struct drm_plane_state *old_plane_state, *new_plane_state;
7534 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7535 struct drm_crtc_state *new_pcrtc_state =
7536 drm_atomic_get_new_crtc_state(state, pcrtc);
7537 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7538 struct dm_crtc_state *dm_old_crtc_state =
7539 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7540 int planes_count = 0, vpos, hpos;
7541 unsigned long flags;
7542 uint32_t target_vblank, last_flip_vblank;
7543 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7544 bool cursor_update = false;
7545 bool pflip_present = false;
7547 struct dc_surface_update surface_updates[MAX_SURFACES];
7548 struct dc_plane_info plane_infos[MAX_SURFACES];
7549 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7550 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7551 struct dc_stream_update stream_update;
7554 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7557 dm_error("Failed to allocate update bundle\n");
7562 * Disable the cursor first if we're disabling all the planes.
7563 * It'll remain on the screen after the planes are re-enabled
7566 if (acrtc_state->active_planes == 0)
7567 amdgpu_dm_commit_cursors(state);
7569 /* update planes when needed */
7570 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7571 struct drm_crtc *crtc = new_plane_state->crtc;
7572 struct drm_crtc_state *new_crtc_state;
7573 struct drm_framebuffer *fb = new_plane_state->fb;
7574 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7575 bool plane_needs_flip;
7576 struct dc_plane_state *dc_plane;
7577 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7579 /* Cursor plane is handled after stream updates */
7580 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7581 if ((fb && crtc == pcrtc) ||
7582 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7583 cursor_update = true;
7588 if (!fb || !crtc || pcrtc != crtc)
7591 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7592 if (!new_crtc_state->active)
7595 dc_plane = dm_new_plane_state->dc_state;
7597 bundle->surface_updates[planes_count].surface = dc_plane;
7598 if (new_pcrtc_state->color_mgmt_changed) {
7599 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7600 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7601 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7604 fill_dc_scaling_info(dm->adev, new_plane_state,
7605 &bundle->scaling_infos[planes_count]);
7607 bundle->surface_updates[planes_count].scaling_info =
7608 &bundle->scaling_infos[planes_count];
7610 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7612 pflip_present = pflip_present || plane_needs_flip;
7614 if (!plane_needs_flip) {
7619 fill_dc_plane_info_and_addr(
7620 dm->adev, new_plane_state,
7622 &bundle->plane_infos[planes_count],
7623 &bundle->flip_addrs[planes_count].address,
7624 afb->tmz_surface, false);
7626 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7627 new_plane_state->plane->index,
7628 bundle->plane_infos[planes_count].dcc.enable);
7630 bundle->surface_updates[planes_count].plane_info =
7631 &bundle->plane_infos[planes_count];
7633 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
7635 &bundle->flip_addrs[planes_count]);
7638 * Only allow immediate flips for fast updates that don't
7639 * change FB pitch, DCC state, rotation or mirroing.
7641 bundle->flip_addrs[planes_count].flip_immediate =
7642 crtc->state->async_flip &&
7643 acrtc_state->update_type == UPDATE_TYPE_FAST;
7645 timestamp_ns = ktime_get_ns();
7646 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7647 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7648 bundle->surface_updates[planes_count].surface = dc_plane;
7650 if (!bundle->surface_updates[planes_count].surface) {
7651 DRM_ERROR("No surface for CRTC: id=%d\n",
7652 acrtc_attach->crtc_id);
7656 if (plane == pcrtc->primary)
7657 update_freesync_state_on_stream(
7660 acrtc_state->stream,
7662 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7664 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7666 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7667 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7673 if (pflip_present) {
7675 /* Use old throttling in non-vrr fixed refresh rate mode
7676 * to keep flip scheduling based on target vblank counts
7677 * working in a backwards compatible way, e.g., for
7678 * clients using the GLX_OML_sync_control extension or
7679 * DRI3/Present extension with defined target_msc.
7681 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7684 /* For variable refresh rate mode only:
7685 * Get vblank of last completed flip to avoid > 1 vrr
7686 * flips per video frame by use of throttling, but allow
7687 * flip programming anywhere in the possibly large
7688 * variable vrr vblank interval for fine-grained flip
7689 * timing control and more opportunity to avoid stutter
7690 * on late submission of flips.
7692 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7693 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7694 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7697 target_vblank = last_flip_vblank + wait_for_vblank;
7700 * Wait until we're out of the vertical blank period before the one
7701 * targeted by the flip
7703 while ((acrtc_attach->enabled &&
7704 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7705 0, &vpos, &hpos, NULL,
7706 NULL, &pcrtc->hwmode)
7707 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7708 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7709 (int)(target_vblank -
7710 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7711 usleep_range(1000, 1100);
7715 * Prepare the flip event for the pageflip interrupt to handle.
7717 * This only works in the case where we've already turned on the
7718 * appropriate hardware blocks (eg. HUBP) so in the transition case
7719 * from 0 -> n planes we have to skip a hardware generated event
7720 * and rely on sending it from software.
7722 if (acrtc_attach->base.state->event &&
7723 acrtc_state->active_planes > 0) {
7724 drm_crtc_vblank_get(pcrtc);
7726 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7728 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7729 prepare_flip_isr(acrtc_attach);
7731 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7734 if (acrtc_state->stream) {
7735 if (acrtc_state->freesync_vrr_info_changed)
7736 bundle->stream_update.vrr_infopacket =
7737 &acrtc_state->stream->vrr_infopacket;
7739 } else if (cursor_update && acrtc_state->active_planes > 0 &&
7740 acrtc_attach->base.state->event) {
7741 drm_crtc_vblank_get(pcrtc);
7743 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7745 acrtc_attach->event = acrtc_attach->base.state->event;
7746 acrtc_attach->base.state->event = NULL;
7748 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7751 /* Update the planes if changed or disable if we don't have any. */
7752 if ((planes_count || acrtc_state->active_planes == 0) &&
7753 acrtc_state->stream) {
7755 * If PSR or idle optimizations are enabled then flush out
7756 * any pending work before hardware programming.
7758 if (dm->vblank_control_workqueue)
7759 flush_workqueue(dm->vblank_control_workqueue);
7761 bundle->stream_update.stream = acrtc_state->stream;
7762 if (new_pcrtc_state->mode_changed) {
7763 bundle->stream_update.src = acrtc_state->stream->src;
7764 bundle->stream_update.dst = acrtc_state->stream->dst;
7767 if (new_pcrtc_state->color_mgmt_changed) {
7769 * TODO: This isn't fully correct since we've actually
7770 * already modified the stream in place.
7772 bundle->stream_update.gamut_remap =
7773 &acrtc_state->stream->gamut_remap_matrix;
7774 bundle->stream_update.output_csc_transform =
7775 &acrtc_state->stream->csc_color_matrix;
7776 bundle->stream_update.out_transfer_func =
7777 acrtc_state->stream->out_transfer_func;
7780 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7781 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7782 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7785 * If FreeSync state on the stream has changed then we need to
7786 * re-adjust the min/max bounds now that DC doesn't handle this
7787 * as part of commit.
7789 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7790 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7791 dc_stream_adjust_vmin_vmax(
7792 dm->dc, acrtc_state->stream,
7793 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7794 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7796 mutex_lock(&dm->dc_lock);
7797 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7798 acrtc_state->stream->link->psr_settings.psr_allow_active)
7799 amdgpu_dm_psr_disable(acrtc_state->stream);
7801 dc_commit_updates_for_stream(dm->dc,
7802 bundle->surface_updates,
7804 acrtc_state->stream,
7805 &bundle->stream_update,
7809 * Enable or disable the interrupts on the backend.
7811 * Most pipes are put into power gating when unused.
7813 * When power gating is enabled on a pipe we lose the
7814 * interrupt enablement state when power gating is disabled.
7816 * So we need to update the IRQ control state in hardware
7817 * whenever the pipe turns on (since it could be previously
7818 * power gated) or off (since some pipes can't be power gated
7821 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7822 dm_update_pflip_irq_state(drm_to_adev(dev),
7825 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7826 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7827 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7828 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7830 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
7831 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7832 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7833 struct amdgpu_dm_connector *aconn =
7834 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7836 if (aconn->psr_skip_count > 0)
7837 aconn->psr_skip_count--;
7839 /* Allow PSR when skip count is 0. */
7840 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7843 * If sink supports PSR SU, there is no need to rely on
7844 * a vblank event disable request to enable PSR. PSR SU
7845 * can be enabled immediately once OS demonstrates an
7846 * adequate number of fast atomic commits to notify KMD
7847 * of update events. See `vblank_control_worker()`.
7849 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7850 acrtc_attach->dm_irq_params.allow_psr_entry &&
7851 !acrtc_state->stream->link->psr_settings.psr_allow_active)
7852 amdgpu_dm_psr_enable(acrtc_state->stream);
7854 acrtc_attach->dm_irq_params.allow_psr_entry = false;
7857 mutex_unlock(&dm->dc_lock);
7861 * Update cursor state *after* programming all the planes.
7862 * This avoids redundant programming in the case where we're going
7863 * to be disabling a single plane - those pipes are being disabled.
7865 if (acrtc_state->active_planes)
7866 amdgpu_dm_commit_cursors(state);
7872 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7873 struct drm_atomic_state *state)
7875 struct amdgpu_device *adev = drm_to_adev(dev);
7876 struct amdgpu_dm_connector *aconnector;
7877 struct drm_connector *connector;
7878 struct drm_connector_state *old_con_state, *new_con_state;
7879 struct drm_crtc_state *new_crtc_state;
7880 struct dm_crtc_state *new_dm_crtc_state;
7881 const struct dc_stream_status *status;
7884 /* Notify device removals. */
7885 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7886 if (old_con_state->crtc != new_con_state->crtc) {
7887 /* CRTC changes require notification. */
7891 if (!new_con_state->crtc)
7894 new_crtc_state = drm_atomic_get_new_crtc_state(
7895 state, new_con_state->crtc);
7897 if (!new_crtc_state)
7900 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7904 aconnector = to_amdgpu_dm_connector(connector);
7906 mutex_lock(&adev->dm.audio_lock);
7907 inst = aconnector->audio_inst;
7908 aconnector->audio_inst = -1;
7909 mutex_unlock(&adev->dm.audio_lock);
7911 amdgpu_dm_audio_eld_notify(adev, inst);
7914 /* Notify audio device additions. */
7915 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7916 if (!new_con_state->crtc)
7919 new_crtc_state = drm_atomic_get_new_crtc_state(
7920 state, new_con_state->crtc);
7922 if (!new_crtc_state)
7925 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7928 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7929 if (!new_dm_crtc_state->stream)
7932 status = dc_stream_get_status(new_dm_crtc_state->stream);
7936 aconnector = to_amdgpu_dm_connector(connector);
7938 mutex_lock(&adev->dm.audio_lock);
7939 inst = status->audio_inst;
7940 aconnector->audio_inst = inst;
7941 mutex_unlock(&adev->dm.audio_lock);
7943 amdgpu_dm_audio_eld_notify(adev, inst);
7948 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7949 * @crtc_state: the DRM CRTC state
7950 * @stream_state: the DC stream state.
7952 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7953 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7955 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7956 struct dc_stream_state *stream_state)
7958 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7962 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7963 * @state: The atomic state to commit
7965 * This will tell DC to commit the constructed DC state from atomic_check,
7966 * programming the hardware. Any failures here implies a hardware failure, since
7967 * atomic check should have filtered anything non-kosher.
7969 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7971 struct drm_device *dev = state->dev;
7972 struct amdgpu_device *adev = drm_to_adev(dev);
7973 struct amdgpu_display_manager *dm = &adev->dm;
7974 struct dm_atomic_state *dm_state;
7975 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7977 struct drm_crtc *crtc;
7978 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7979 unsigned long flags;
7980 bool wait_for_vblank = true;
7981 struct drm_connector *connector;
7982 struct drm_connector_state *old_con_state, *new_con_state;
7983 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7984 int crtc_disable_count = 0;
7985 bool mode_set_reset_required = false;
7988 trace_amdgpu_dm_atomic_commit_tail_begin(state);
7990 r = drm_atomic_helper_wait_for_fences(dev, state, false);
7992 DRM_ERROR("Waiting for fences timed out!");
7994 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7996 dm_state = dm_atomic_get_new_state(state);
7997 if (dm_state && dm_state->context) {
7998 dc_state = dm_state->context;
8000 /* No state changes, retain current state. */
8001 dc_state_temp = dc_create_state(dm->dc);
8002 ASSERT(dc_state_temp);
8003 dc_state = dc_state_temp;
8004 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8007 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8008 new_crtc_state, i) {
8009 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8011 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8013 if (old_crtc_state->active &&
8014 (!new_crtc_state->active ||
8015 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8016 manage_dm_interrupts(adev, acrtc, false);
8017 dc_stream_release(dm_old_crtc_state->stream);
8021 drm_atomic_helper_calc_timestamping_constants(state);
8023 /* update changed items */
8024 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8025 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8027 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8028 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8030 drm_dbg_state(state->dev,
8031 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8032 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8033 "connectors_changed:%d\n",
8035 new_crtc_state->enable,
8036 new_crtc_state->active,
8037 new_crtc_state->planes_changed,
8038 new_crtc_state->mode_changed,
8039 new_crtc_state->active_changed,
8040 new_crtc_state->connectors_changed);
8042 /* Disable cursor if disabling crtc */
8043 if (old_crtc_state->active && !new_crtc_state->active) {
8044 struct dc_cursor_position position;
8046 memset(&position, 0, sizeof(position));
8047 mutex_lock(&dm->dc_lock);
8048 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8049 mutex_unlock(&dm->dc_lock);
8052 /* Copy all transient state flags into dc state */
8053 if (dm_new_crtc_state->stream) {
8054 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8055 dm_new_crtc_state->stream);
8058 /* handles headless hotplug case, updating new_state and
8059 * aconnector as needed
8062 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8064 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8066 if (!dm_new_crtc_state->stream) {
8068 * this could happen because of issues with
8069 * userspace notifications delivery.
8070 * In this case userspace tries to set mode on
8071 * display which is disconnected in fact.
8072 * dc_sink is NULL in this case on aconnector.
8073 * We expect reset mode will come soon.
8075 * This can also happen when unplug is done
8076 * during resume sequence ended
8078 * In this case, we want to pretend we still
8079 * have a sink to keep the pipe running so that
8080 * hw state is consistent with the sw state
8082 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8083 __func__, acrtc->base.base.id);
8087 if (dm_old_crtc_state->stream)
8088 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8090 pm_runtime_get_noresume(dev->dev);
8092 acrtc->enabled = true;
8093 acrtc->hw_mode = new_crtc_state->mode;
8094 crtc->hwmode = new_crtc_state->mode;
8095 mode_set_reset_required = true;
8096 } else if (modereset_required(new_crtc_state)) {
8097 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8098 /* i.e. reset mode */
8099 if (dm_old_crtc_state->stream)
8100 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8102 mode_set_reset_required = true;
8104 } /* for_each_crtc_in_state() */
8107 /* if there mode set or reset, disable eDP PSR */
8108 if (mode_set_reset_required) {
8109 if (dm->vblank_control_workqueue)
8110 flush_workqueue(dm->vblank_control_workqueue);
8112 amdgpu_dm_psr_disable_all(dm);
8115 dm_enable_per_frame_crtc_master_sync(dc_state);
8116 mutex_lock(&dm->dc_lock);
8117 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8119 /* Allow idle optimization when vblank count is 0 for display off */
8120 if (dm->active_vblank_irq_count == 0)
8121 dc_allow_idle_optimizations(dm->dc, true);
8122 mutex_unlock(&dm->dc_lock);
8125 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8126 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8128 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8130 if (dm_new_crtc_state->stream != NULL) {
8131 const struct dc_stream_status *status =
8132 dc_stream_get_status(dm_new_crtc_state->stream);
8135 status = dc_stream_get_status_from_state(dc_state,
8136 dm_new_crtc_state->stream);
8138 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8140 acrtc->otg_inst = status->primary_otg_inst;
8143 #ifdef CONFIG_DRM_AMD_DC_HDCP
8144 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8145 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8146 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8147 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8149 new_crtc_state = NULL;
8152 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8154 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8156 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8157 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8158 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8159 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8160 dm_new_con_state->update_hdcp = true;
8164 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8165 hdcp_update_display(
8166 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8167 new_con_state->hdcp_content_type,
8168 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8172 /* Handle connector state changes */
8173 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8174 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8175 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8176 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8177 struct dc_surface_update dummy_updates[MAX_SURFACES];
8178 struct dc_stream_update stream_update;
8179 struct dc_info_packet hdr_packet;
8180 struct dc_stream_status *status = NULL;
8181 bool abm_changed, hdr_changed, scaling_changed;
8183 memset(&dummy_updates, 0, sizeof(dummy_updates));
8184 memset(&stream_update, 0, sizeof(stream_update));
8187 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8188 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8191 /* Skip any modesets/resets */
8192 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8195 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8196 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8198 scaling_changed = is_scaling_state_different(dm_new_con_state,
8201 abm_changed = dm_new_crtc_state->abm_level !=
8202 dm_old_crtc_state->abm_level;
8205 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8207 if (!scaling_changed && !abm_changed && !hdr_changed)
8210 stream_update.stream = dm_new_crtc_state->stream;
8211 if (scaling_changed) {
8212 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8213 dm_new_con_state, dm_new_crtc_state->stream);
8215 stream_update.src = dm_new_crtc_state->stream->src;
8216 stream_update.dst = dm_new_crtc_state->stream->dst;
8220 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8222 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8226 fill_hdr_info_packet(new_con_state, &hdr_packet);
8227 stream_update.hdr_static_metadata = &hdr_packet;
8230 status = dc_stream_get_status(dm_new_crtc_state->stream);
8232 if (WARN_ON(!status))
8235 WARN_ON(!status->plane_count);
8238 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8239 * Here we create an empty update on each plane.
8240 * To fix this, DC should permit updating only stream properties.
8242 for (j = 0; j < status->plane_count; j++)
8243 dummy_updates[j].surface = status->plane_states[0];
8246 mutex_lock(&dm->dc_lock);
8247 dc_commit_updates_for_stream(dm->dc,
8249 status->plane_count,
8250 dm_new_crtc_state->stream,
8253 mutex_unlock(&dm->dc_lock);
8256 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8257 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8258 new_crtc_state, i) {
8259 if (old_crtc_state->active && !new_crtc_state->active)
8260 crtc_disable_count++;
8262 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8263 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8265 /* For freesync config update on crtc state and params for irq */
8266 update_stream_irq_parameters(dm, dm_new_crtc_state);
8268 /* Handle vrr on->off / off->on transitions */
8269 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8274 * Enable interrupts for CRTCs that are newly enabled or went through
8275 * a modeset. It was intentionally deferred until after the front end
8276 * state was modified to wait until the OTG was on and so the IRQ
8277 * handlers didn't access stale or invalid state.
8279 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8280 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8281 #ifdef CONFIG_DEBUG_FS
8282 bool configure_crc = false;
8283 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8284 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8285 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8287 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8288 cur_crc_src = acrtc->dm_irq_params.crc_src;
8289 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8291 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8293 if (new_crtc_state->active &&
8294 (!old_crtc_state->active ||
8295 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8296 dc_stream_retain(dm_new_crtc_state->stream);
8297 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8298 manage_dm_interrupts(adev, acrtc, true);
8300 #ifdef CONFIG_DEBUG_FS
8302 * Frontend may have changed so reapply the CRC capture
8303 * settings for the stream.
8305 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8307 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8308 configure_crc = true;
8309 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8310 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8311 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8312 acrtc->dm_irq_params.crc_window.update_win = true;
8313 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8314 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8315 crc_rd_wrk->crtc = crtc;
8316 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8317 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8323 if (amdgpu_dm_crtc_configure_crc_source(
8324 crtc, dm_new_crtc_state, cur_crc_src))
8325 DRM_DEBUG_DRIVER("Failed to configure crc source");
8330 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8331 if (new_crtc_state->async_flip)
8332 wait_for_vblank = false;
8334 /* update planes when needed per crtc*/
8335 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8336 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8338 if (dm_new_crtc_state->stream)
8339 amdgpu_dm_commit_planes(state, dc_state, dev,
8340 dm, crtc, wait_for_vblank);
8343 /* Update audio instances for each connector. */
8344 amdgpu_dm_commit_audio(dev, state);
8346 /* restore the backlight level */
8347 for (i = 0; i < dm->num_of_edps; i++) {
8348 if (dm->backlight_dev[i] &&
8349 (dm->actual_brightness[i] != dm->brightness[i]))
8350 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8354 * send vblank event on all events not handled in flip and
8355 * mark consumed event for drm_atomic_helper_commit_hw_done
8357 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8358 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8360 if (new_crtc_state->event)
8361 drm_send_event_locked(dev, &new_crtc_state->event->base);
8363 new_crtc_state->event = NULL;
8365 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8367 /* Signal HW programming completion */
8368 drm_atomic_helper_commit_hw_done(state);
8370 if (wait_for_vblank)
8371 drm_atomic_helper_wait_for_flip_done(dev, state);
8373 drm_atomic_helper_cleanup_planes(dev, state);
8375 /* return the stolen vga memory back to VRAM */
8376 if (!adev->mman.keep_stolen_vga_memory)
8377 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8378 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8381 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8382 * so we can put the GPU into runtime suspend if we're not driving any
8385 for (i = 0; i < crtc_disable_count; i++)
8386 pm_runtime_put_autosuspend(dev->dev);
8387 pm_runtime_mark_last_busy(dev->dev);
8390 dc_release_state(dc_state_temp);
8394 static int dm_force_atomic_commit(struct drm_connector *connector)
8397 struct drm_device *ddev = connector->dev;
8398 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8399 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8400 struct drm_plane *plane = disconnected_acrtc->base.primary;
8401 struct drm_connector_state *conn_state;
8402 struct drm_crtc_state *crtc_state;
8403 struct drm_plane_state *plane_state;
8408 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8410 /* Construct an atomic state to restore previous display setting */
8413 * Attach connectors to drm_atomic_state
8415 conn_state = drm_atomic_get_connector_state(state, connector);
8417 ret = PTR_ERR_OR_ZERO(conn_state);
8421 /* Attach crtc to drm_atomic_state*/
8422 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8424 ret = PTR_ERR_OR_ZERO(crtc_state);
8428 /* force a restore */
8429 crtc_state->mode_changed = true;
8431 /* Attach plane to drm_atomic_state */
8432 plane_state = drm_atomic_get_plane_state(state, plane);
8434 ret = PTR_ERR_OR_ZERO(plane_state);
8438 /* Call commit internally with the state we just constructed */
8439 ret = drm_atomic_commit(state);
8442 drm_atomic_state_put(state);
8444 DRM_ERROR("Restoring old state failed with %i\n", ret);
8450 * This function handles all cases when set mode does not come upon hotplug.
8451 * This includes when a display is unplugged then plugged back into the
8452 * same port and when running without usermode desktop manager supprot
8454 void dm_restore_drm_connector_state(struct drm_device *dev,
8455 struct drm_connector *connector)
8457 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8458 struct amdgpu_crtc *disconnected_acrtc;
8459 struct dm_crtc_state *acrtc_state;
8461 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8464 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8465 if (!disconnected_acrtc)
8468 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8469 if (!acrtc_state->stream)
8473 * If the previous sink is not released and different from the current,
8474 * we deduce we are in a state where we can not rely on usermode call
8475 * to turn on the display, so we do it here
8477 if (acrtc_state->stream->sink != aconnector->dc_sink)
8478 dm_force_atomic_commit(&aconnector->base);
8482 * Grabs all modesetting locks to serialize against any blocking commits,
8483 * Waits for completion of all non blocking commits.
8485 static int do_aquire_global_lock(struct drm_device *dev,
8486 struct drm_atomic_state *state)
8488 struct drm_crtc *crtc;
8489 struct drm_crtc_commit *commit;
8493 * Adding all modeset locks to aquire_ctx will
8494 * ensure that when the framework release it the
8495 * extra locks we are locking here will get released to
8497 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8501 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8502 spin_lock(&crtc->commit_lock);
8503 commit = list_first_entry_or_null(&crtc->commit_list,
8504 struct drm_crtc_commit, commit_entry);
8506 drm_crtc_commit_get(commit);
8507 spin_unlock(&crtc->commit_lock);
8513 * Make sure all pending HW programming completed and
8516 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8519 ret = wait_for_completion_interruptible_timeout(
8520 &commit->flip_done, 10*HZ);
8523 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8524 "timed out\n", crtc->base.id, crtc->name);
8526 drm_crtc_commit_put(commit);
8529 return ret < 0 ? ret : 0;
8532 static void get_freesync_config_for_crtc(
8533 struct dm_crtc_state *new_crtc_state,
8534 struct dm_connector_state *new_con_state)
8536 struct mod_freesync_config config = {0};
8537 struct amdgpu_dm_connector *aconnector =
8538 to_amdgpu_dm_connector(new_con_state->base.connector);
8539 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8540 int vrefresh = drm_mode_vrefresh(mode);
8541 bool fs_vid_mode = false;
8543 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8544 vrefresh >= aconnector->min_vfreq &&
8545 vrefresh <= aconnector->max_vfreq;
8547 if (new_crtc_state->vrr_supported) {
8548 new_crtc_state->stream->ignore_msa_timing_param = true;
8549 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8551 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8552 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8553 config.vsif_supported = true;
8557 config.state = VRR_STATE_ACTIVE_FIXED;
8558 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8560 } else if (new_crtc_state->base.vrr_enabled) {
8561 config.state = VRR_STATE_ACTIVE_VARIABLE;
8563 config.state = VRR_STATE_INACTIVE;
8567 new_crtc_state->freesync_config = config;
8570 static void reset_freesync_config_for_crtc(
8571 struct dm_crtc_state *new_crtc_state)
8573 new_crtc_state->vrr_supported = false;
8575 memset(&new_crtc_state->vrr_infopacket, 0,
8576 sizeof(new_crtc_state->vrr_infopacket));
8580 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8581 struct drm_crtc_state *new_crtc_state)
8583 const struct drm_display_mode *old_mode, *new_mode;
8585 if (!old_crtc_state || !new_crtc_state)
8588 old_mode = &old_crtc_state->mode;
8589 new_mode = &new_crtc_state->mode;
8591 if (old_mode->clock == new_mode->clock &&
8592 old_mode->hdisplay == new_mode->hdisplay &&
8593 old_mode->vdisplay == new_mode->vdisplay &&
8594 old_mode->htotal == new_mode->htotal &&
8595 old_mode->vtotal != new_mode->vtotal &&
8596 old_mode->hsync_start == new_mode->hsync_start &&
8597 old_mode->vsync_start != new_mode->vsync_start &&
8598 old_mode->hsync_end == new_mode->hsync_end &&
8599 old_mode->vsync_end != new_mode->vsync_end &&
8600 old_mode->hskew == new_mode->hskew &&
8601 old_mode->vscan == new_mode->vscan &&
8602 (old_mode->vsync_end - old_mode->vsync_start) ==
8603 (new_mode->vsync_end - new_mode->vsync_start))
8609 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8610 uint64_t num, den, res;
8611 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8613 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8615 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8616 den = (unsigned long long)new_crtc_state->mode.htotal *
8617 (unsigned long long)new_crtc_state->mode.vtotal;
8619 res = div_u64(num, den);
8620 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8623 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8624 struct drm_atomic_state *state,
8625 struct drm_crtc *crtc,
8626 struct drm_crtc_state *old_crtc_state,
8627 struct drm_crtc_state *new_crtc_state,
8629 bool *lock_and_validation_needed)
8631 struct dm_atomic_state *dm_state = NULL;
8632 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8633 struct dc_stream_state *new_stream;
8637 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8638 * update changed items
8640 struct amdgpu_crtc *acrtc = NULL;
8641 struct amdgpu_dm_connector *aconnector = NULL;
8642 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8643 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8647 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8648 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8649 acrtc = to_amdgpu_crtc(crtc);
8650 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8652 /* TODO This hack should go away */
8653 if (aconnector && enable) {
8654 /* Make sure fake sink is created in plug-in scenario */
8655 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8657 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8660 if (IS_ERR(drm_new_conn_state)) {
8661 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8665 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8666 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8668 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8671 new_stream = create_validate_stream_for_sink(aconnector,
8672 &new_crtc_state->mode,
8674 dm_old_crtc_state->stream);
8677 * we can have no stream on ACTION_SET if a display
8678 * was disconnected during S3, in this case it is not an
8679 * error, the OS will be updated after detection, and
8680 * will do the right thing on next atomic commit
8684 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8685 __func__, acrtc->base.base.id);
8691 * TODO: Check VSDB bits to decide whether this should
8692 * be enabled or not.
8694 new_stream->triggered_crtc_reset.enabled =
8695 dm->force_timing_sync;
8697 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8699 ret = fill_hdr_info_packet(drm_new_conn_state,
8700 &new_stream->hdr_static_metadata);
8705 * If we already removed the old stream from the context
8706 * (and set the new stream to NULL) then we can't reuse
8707 * the old stream even if the stream and scaling are unchanged.
8708 * We'll hit the BUG_ON and black screen.
8710 * TODO: Refactor this function to allow this check to work
8711 * in all conditions.
8713 if (dm_new_crtc_state->stream &&
8714 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8717 if (dm_new_crtc_state->stream &&
8718 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8719 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8720 new_crtc_state->mode_changed = false;
8721 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8722 new_crtc_state->mode_changed);
8726 /* mode_changed flag may get updated above, need to check again */
8727 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8730 drm_dbg_state(state->dev,
8731 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8732 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8733 "connectors_changed:%d\n",
8735 new_crtc_state->enable,
8736 new_crtc_state->active,
8737 new_crtc_state->planes_changed,
8738 new_crtc_state->mode_changed,
8739 new_crtc_state->active_changed,
8740 new_crtc_state->connectors_changed);
8742 /* Remove stream for any changed/disabled CRTC */
8745 if (!dm_old_crtc_state->stream)
8748 if (dm_new_crtc_state->stream &&
8749 is_timing_unchanged_for_freesync(new_crtc_state,
8751 new_crtc_state->mode_changed = false;
8753 "Mode change not required for front porch change, "
8754 "setting mode_changed to %d",
8755 new_crtc_state->mode_changed);
8757 set_freesync_fixed_config(dm_new_crtc_state);
8760 } else if (aconnector &&
8761 is_freesync_video_mode(&new_crtc_state->mode,
8763 struct drm_display_mode *high_mode;
8765 high_mode = get_highest_refresh_rate_mode(aconnector, false);
8766 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8767 set_freesync_fixed_config(dm_new_crtc_state);
8771 ret = dm_atomic_get_state(state, &dm_state);
8775 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8778 /* i.e. reset mode */
8779 if (dc_remove_stream_from_ctx(
8782 dm_old_crtc_state->stream) != DC_OK) {
8787 dc_stream_release(dm_old_crtc_state->stream);
8788 dm_new_crtc_state->stream = NULL;
8790 reset_freesync_config_for_crtc(dm_new_crtc_state);
8792 *lock_and_validation_needed = true;
8794 } else {/* Add stream for any updated/enabled CRTC */
8796 * Quick fix to prevent NULL pointer on new_stream when
8797 * added MST connectors not found in existing crtc_state in the chained mode
8798 * TODO: need to dig out the root cause of that
8803 if (modereset_required(new_crtc_state))
8806 if (modeset_required(new_crtc_state, new_stream,
8807 dm_old_crtc_state->stream)) {
8809 WARN_ON(dm_new_crtc_state->stream);
8811 ret = dm_atomic_get_state(state, &dm_state);
8815 dm_new_crtc_state->stream = new_stream;
8817 dc_stream_retain(new_stream);
8819 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8822 if (dc_add_stream_to_ctx(
8825 dm_new_crtc_state->stream) != DC_OK) {
8830 *lock_and_validation_needed = true;
8835 /* Release extra reference */
8837 dc_stream_release(new_stream);
8840 * We want to do dc stream updates that do not require a
8841 * full modeset below.
8843 if (!(enable && aconnector && new_crtc_state->active))
8846 * Given above conditions, the dc state cannot be NULL because:
8847 * 1. We're in the process of enabling CRTCs (just been added
8848 * to the dc context, or already is on the context)
8849 * 2. Has a valid connector attached, and
8850 * 3. Is currently active and enabled.
8851 * => The dc stream state currently exists.
8853 BUG_ON(dm_new_crtc_state->stream == NULL);
8855 /* Scaling or underscan settings */
8856 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8857 drm_atomic_crtc_needs_modeset(new_crtc_state))
8858 update_stream_scaling_settings(
8859 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8862 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8865 * Color management settings. We also update color properties
8866 * when a modeset is needed, to ensure it gets reprogrammed.
8868 if (dm_new_crtc_state->base.color_mgmt_changed ||
8869 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8870 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8875 /* Update Freesync settings. */
8876 get_freesync_config_for_crtc(dm_new_crtc_state,
8883 dc_stream_release(new_stream);
8887 static bool should_reset_plane(struct drm_atomic_state *state,
8888 struct drm_plane *plane,
8889 struct drm_plane_state *old_plane_state,
8890 struct drm_plane_state *new_plane_state)
8892 struct drm_plane *other;
8893 struct drm_plane_state *old_other_state, *new_other_state;
8894 struct drm_crtc_state *new_crtc_state;
8898 * TODO: Remove this hack once the checks below are sufficient
8899 * enough to determine when we need to reset all the planes on
8902 if (state->allow_modeset)
8905 /* Exit early if we know that we're adding or removing the plane. */
8906 if (old_plane_state->crtc != new_plane_state->crtc)
8909 /* old crtc == new_crtc == NULL, plane not in context. */
8910 if (!new_plane_state->crtc)
8914 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8916 if (!new_crtc_state)
8919 /* CRTC Degamma changes currently require us to recreate planes. */
8920 if (new_crtc_state->color_mgmt_changed)
8923 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8927 * If there are any new primary or overlay planes being added or
8928 * removed then the z-order can potentially change. To ensure
8929 * correct z-order and pipe acquisition the current DC architecture
8930 * requires us to remove and recreate all existing planes.
8932 * TODO: Come up with a more elegant solution for this.
8934 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8935 struct amdgpu_framebuffer *old_afb, *new_afb;
8936 if (other->type == DRM_PLANE_TYPE_CURSOR)
8939 if (old_other_state->crtc != new_plane_state->crtc &&
8940 new_other_state->crtc != new_plane_state->crtc)
8943 if (old_other_state->crtc != new_other_state->crtc)
8946 /* Src/dst size and scaling updates. */
8947 if (old_other_state->src_w != new_other_state->src_w ||
8948 old_other_state->src_h != new_other_state->src_h ||
8949 old_other_state->crtc_w != new_other_state->crtc_w ||
8950 old_other_state->crtc_h != new_other_state->crtc_h)
8953 /* Rotation / mirroring updates. */
8954 if (old_other_state->rotation != new_other_state->rotation)
8957 /* Blending updates. */
8958 if (old_other_state->pixel_blend_mode !=
8959 new_other_state->pixel_blend_mode)
8962 /* Alpha updates. */
8963 if (old_other_state->alpha != new_other_state->alpha)
8966 /* Colorspace changes. */
8967 if (old_other_state->color_range != new_other_state->color_range ||
8968 old_other_state->color_encoding != new_other_state->color_encoding)
8971 /* Framebuffer checks fall at the end. */
8972 if (!old_other_state->fb || !new_other_state->fb)
8975 /* Pixel format changes can require bandwidth updates. */
8976 if (old_other_state->fb->format != new_other_state->fb->format)
8979 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8980 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8982 /* Tiling and DCC changes also require bandwidth updates. */
8983 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8984 old_afb->base.modifier != new_afb->base.modifier)
8991 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8992 struct drm_plane_state *new_plane_state,
8993 struct drm_framebuffer *fb)
8995 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8996 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9000 if (fb->width > new_acrtc->max_cursor_width ||
9001 fb->height > new_acrtc->max_cursor_height) {
9002 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9003 new_plane_state->fb->width,
9004 new_plane_state->fb->height);
9007 if (new_plane_state->src_w != fb->width << 16 ||
9008 new_plane_state->src_h != fb->height << 16) {
9009 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9013 /* Pitch in pixels */
9014 pitch = fb->pitches[0] / fb->format->cpp[0];
9016 if (fb->width != pitch) {
9017 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9026 /* FB pitch is supported by cursor plane */
9029 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9033 /* Core DRM takes care of checking FB modifiers, so we only need to
9034 * check tiling flags when the FB doesn't have a modifier. */
9035 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9036 if (adev->family < AMDGPU_FAMILY_AI) {
9037 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9038 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9039 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9041 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9044 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9052 static int dm_update_plane_state(struct dc *dc,
9053 struct drm_atomic_state *state,
9054 struct drm_plane *plane,
9055 struct drm_plane_state *old_plane_state,
9056 struct drm_plane_state *new_plane_state,
9058 bool *lock_and_validation_needed)
9061 struct dm_atomic_state *dm_state = NULL;
9062 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9063 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9064 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9065 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9066 struct amdgpu_crtc *new_acrtc;
9071 new_plane_crtc = new_plane_state->crtc;
9072 old_plane_crtc = old_plane_state->crtc;
9073 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9074 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9076 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9077 if (!enable || !new_plane_crtc ||
9078 drm_atomic_plane_disabling(plane->state, new_plane_state))
9081 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9083 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9084 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9088 if (new_plane_state->fb) {
9089 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9090 new_plane_state->fb);
9098 needs_reset = should_reset_plane(state, plane, old_plane_state,
9101 /* Remove any changed/removed planes */
9106 if (!old_plane_crtc)
9109 old_crtc_state = drm_atomic_get_old_crtc_state(
9110 state, old_plane_crtc);
9111 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9113 if (!dm_old_crtc_state->stream)
9116 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9117 plane->base.id, old_plane_crtc->base.id);
9119 ret = dm_atomic_get_state(state, &dm_state);
9123 if (!dc_remove_plane_from_context(
9125 dm_old_crtc_state->stream,
9126 dm_old_plane_state->dc_state,
9127 dm_state->context)) {
9133 dc_plane_state_release(dm_old_plane_state->dc_state);
9134 dm_new_plane_state->dc_state = NULL;
9136 *lock_and_validation_needed = true;
9138 } else { /* Add new planes */
9139 struct dc_plane_state *dc_new_plane_state;
9141 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9144 if (!new_plane_crtc)
9147 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9148 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9150 if (!dm_new_crtc_state->stream)
9156 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9160 WARN_ON(dm_new_plane_state->dc_state);
9162 dc_new_plane_state = dc_create_plane_state(dc);
9163 if (!dc_new_plane_state)
9166 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9167 plane->base.id, new_plane_crtc->base.id);
9169 ret = fill_dc_plane_attributes(
9170 drm_to_adev(new_plane_crtc->dev),
9175 dc_plane_state_release(dc_new_plane_state);
9179 ret = dm_atomic_get_state(state, &dm_state);
9181 dc_plane_state_release(dc_new_plane_state);
9186 * Any atomic check errors that occur after this will
9187 * not need a release. The plane state will be attached
9188 * to the stream, and therefore part of the atomic
9189 * state. It'll be released when the atomic state is
9192 if (!dc_add_plane_to_context(
9194 dm_new_crtc_state->stream,
9196 dm_state->context)) {
9198 dc_plane_state_release(dc_new_plane_state);
9202 dm_new_plane_state->dc_state = dc_new_plane_state;
9204 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9206 /* Tell DC to do a full surface update every time there
9207 * is a plane change. Inefficient, but works for now.
9209 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9211 *lock_and_validation_needed = true;
9218 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9219 int *src_w, int *src_h)
9221 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9222 case DRM_MODE_ROTATE_90:
9223 case DRM_MODE_ROTATE_270:
9224 *src_w = plane_state->src_h >> 16;
9225 *src_h = plane_state->src_w >> 16;
9227 case DRM_MODE_ROTATE_0:
9228 case DRM_MODE_ROTATE_180:
9230 *src_w = plane_state->src_w >> 16;
9231 *src_h = plane_state->src_h >> 16;
9236 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9237 struct drm_crtc *crtc,
9238 struct drm_crtc_state *new_crtc_state)
9240 struct drm_plane *cursor = crtc->cursor, *underlying;
9241 struct drm_plane_state *new_cursor_state, *new_underlying_state;
9243 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9244 int cursor_src_w, cursor_src_h;
9245 int underlying_src_w, underlying_src_h;
9247 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9248 * cursor per pipe but it's going to inherit the scaling and
9249 * positioning from the underlying pipe. Check the cursor plane's
9250 * blending properties match the underlying planes'. */
9252 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9253 if (!new_cursor_state || !new_cursor_state->fb) {
9257 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9258 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9259 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9261 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9262 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9263 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9266 /* Ignore disabled planes */
9267 if (!new_underlying_state->fb)
9270 dm_get_oriented_plane_size(new_underlying_state,
9271 &underlying_src_w, &underlying_src_h);
9272 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9273 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9275 if (cursor_scale_w != underlying_scale_w ||
9276 cursor_scale_h != underlying_scale_h) {
9277 drm_dbg_atomic(crtc->dev,
9278 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9279 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9283 /* If this plane covers the whole CRTC, no need to check planes underneath */
9284 if (new_underlying_state->crtc_x <= 0 &&
9285 new_underlying_state->crtc_y <= 0 &&
9286 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9287 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9294 #if defined(CONFIG_DRM_AMD_DC_DCN)
9295 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9297 struct drm_connector *connector;
9298 struct drm_connector_state *conn_state, *old_conn_state;
9299 struct amdgpu_dm_connector *aconnector = NULL;
9301 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9302 if (!conn_state->crtc)
9303 conn_state = old_conn_state;
9305 if (conn_state->crtc != crtc)
9308 aconnector = to_amdgpu_dm_connector(connector);
9309 if (!aconnector->port || !aconnector->mst_port)
9318 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9323 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9324 * @dev: The DRM device
9325 * @state: The atomic state to commit
9327 * Validate that the given atomic state is programmable by DC into hardware.
9328 * This involves constructing a &struct dc_state reflecting the new hardware
9329 * state we wish to commit, then querying DC to see if it is programmable. It's
9330 * important not to modify the existing DC state. Otherwise, atomic_check
9331 * may unexpectedly commit hardware changes.
9333 * When validating the DC state, it's important that the right locks are
9334 * acquired. For full updates case which removes/adds/updates streams on one
9335 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9336 * that any such full update commit will wait for completion of any outstanding
9337 * flip using DRMs synchronization events.
9339 * Note that DM adds the affected connectors for all CRTCs in state, when that
9340 * might not seem necessary. This is because DC stream creation requires the
9341 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9342 * be possible but non-trivial - a possible TODO item.
9344 * Return: -Error code if validation failed.
9346 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9347 struct drm_atomic_state *state)
9349 struct amdgpu_device *adev = drm_to_adev(dev);
9350 struct dm_atomic_state *dm_state = NULL;
9351 struct dc *dc = adev->dm.dc;
9352 struct drm_connector *connector;
9353 struct drm_connector_state *old_con_state, *new_con_state;
9354 struct drm_crtc *crtc;
9355 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9356 struct drm_plane *plane;
9357 struct drm_plane_state *old_plane_state, *new_plane_state;
9358 enum dc_status status;
9360 bool lock_and_validation_needed = false;
9361 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9362 #if defined(CONFIG_DRM_AMD_DC_DCN)
9363 struct dsc_mst_fairness_vars vars[MAX_PIPES];
9364 struct drm_dp_mst_topology_state *mst_state;
9365 struct drm_dp_mst_topology_mgr *mgr;
9368 trace_amdgpu_dm_atomic_check_begin(state);
9370 ret = drm_atomic_helper_check_modeset(dev, state);
9372 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9376 /* Check connector changes */
9377 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9378 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9379 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9381 /* Skip connectors that are disabled or part of modeset already. */
9382 if (!old_con_state->crtc && !new_con_state->crtc)
9385 if (!new_con_state->crtc)
9388 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9389 if (IS_ERR(new_crtc_state)) {
9390 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9391 ret = PTR_ERR(new_crtc_state);
9395 if (dm_old_con_state->abm_level !=
9396 dm_new_con_state->abm_level)
9397 new_crtc_state->connectors_changed = true;
9400 #if defined(CONFIG_DRM_AMD_DC_DCN)
9401 if (dc_resource_is_dsc_encoding_supported(dc)) {
9402 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9403 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9404 ret = add_affected_mst_dsc_crtcs(state, crtc);
9406 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9411 if (!pre_validate_dsc(state, &dm_state, vars)) {
9417 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9418 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9420 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9421 !new_crtc_state->color_mgmt_changed &&
9422 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9423 dm_old_crtc_state->dsc_force_changed == false)
9426 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9428 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9432 if (!new_crtc_state->enable)
9435 ret = drm_atomic_add_affected_connectors(state, crtc);
9437 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9441 ret = drm_atomic_add_affected_planes(state, crtc);
9443 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9447 if (dm_old_crtc_state->dsc_force_changed)
9448 new_crtc_state->mode_changed = true;
9452 * Add all primary and overlay planes on the CRTC to the state
9453 * whenever a plane is enabled to maintain correct z-ordering
9454 * and to enable fast surface updates.
9456 drm_for_each_crtc(crtc, dev) {
9457 bool modified = false;
9459 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9460 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9463 if (new_plane_state->crtc == crtc ||
9464 old_plane_state->crtc == crtc) {
9473 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9474 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9478 drm_atomic_get_plane_state(state, plane);
9480 if (IS_ERR(new_plane_state)) {
9481 ret = PTR_ERR(new_plane_state);
9482 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9488 /* Remove exiting planes if they are modified */
9489 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9490 ret = dm_update_plane_state(dc, state, plane,
9494 &lock_and_validation_needed);
9496 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9501 /* Disable all crtcs which require disable */
9502 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9503 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9507 &lock_and_validation_needed);
9509 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9514 /* Enable all crtcs which require enable */
9515 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9516 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9520 &lock_and_validation_needed);
9522 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9527 /* Add new/modified planes */
9528 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9529 ret = dm_update_plane_state(dc, state, plane,
9533 &lock_and_validation_needed);
9535 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9540 /* Run this here since we want to validate the streams we created */
9541 ret = drm_atomic_helper_check_planes(dev, state);
9543 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9547 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9548 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9549 if (dm_new_crtc_state->mpo_requested)
9550 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9553 /* Check cursor planes scaling */
9554 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9555 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9557 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9562 if (state->legacy_cursor_update) {
9564 * This is a fast cursor update coming from the plane update
9565 * helper, check if it can be done asynchronously for better
9568 state->async_update =
9569 !drm_atomic_helper_async_check(dev, state);
9572 * Skip the remaining global validation if this is an async
9573 * update. Cursor updates can be done without affecting
9574 * state or bandwidth calcs and this avoids the performance
9575 * penalty of locking the private state object and
9576 * allocating a new dc_state.
9578 if (state->async_update)
9582 /* Check scaling and underscan changes*/
9583 /* TODO Removed scaling changes validation due to inability to commit
9584 * new stream into context w\o causing full reset. Need to
9585 * decide how to handle.
9587 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9588 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9589 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9590 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9592 /* Skip any modesets/resets */
9593 if (!acrtc || drm_atomic_crtc_needs_modeset(
9594 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9597 /* Skip any thing not scale or underscan changes */
9598 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9601 lock_and_validation_needed = true;
9604 #if defined(CONFIG_DRM_AMD_DC_DCN)
9605 /* set the slot info for each mst_state based on the link encoding format */
9606 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
9607 struct amdgpu_dm_connector *aconnector;
9608 struct drm_connector *connector;
9609 struct drm_connector_list_iter iter;
9612 if (!mgr->mst_state )
9615 drm_connector_list_iter_begin(dev, &iter);
9616 drm_for_each_connector_iter(connector, &iter) {
9617 int id = connector->index;
9619 if (id == mst_state->mgr->conn_base_id) {
9620 aconnector = to_amdgpu_dm_connector(connector);
9621 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
9622 drm_dp_mst_update_slots(mst_state, link_coding_cap);
9627 drm_connector_list_iter_end(&iter);
9632 * Streams and planes are reset when there are changes that affect
9633 * bandwidth. Anything that affects bandwidth needs to go through
9634 * DC global validation to ensure that the configuration can be applied
9637 * We have to currently stall out here in atomic_check for outstanding
9638 * commits to finish in this case because our IRQ handlers reference
9639 * DRM state directly - we can end up disabling interrupts too early
9642 * TODO: Remove this stall and drop DM state private objects.
9644 if (lock_and_validation_needed) {
9645 ret = dm_atomic_get_state(state, &dm_state);
9647 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9651 ret = do_aquire_global_lock(dev, state);
9653 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9657 #if defined(CONFIG_DRM_AMD_DC_DCN)
9658 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
9659 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9664 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9666 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9672 * Perform validation of MST topology in the state:
9673 * We need to perform MST atomic check before calling
9674 * dc_validate_global_state(), or there is a chance
9675 * to get stuck in an infinite loop and hang eventually.
9677 ret = drm_dp_mst_atomic_check(state);
9679 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9682 status = dc_validate_global_state(dc, dm_state->context, true);
9683 if (status != DC_OK) {
9684 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9685 dc_status_to_str(status), status);
9691 * The commit is a fast update. Fast updates shouldn't change
9692 * the DC context, affect global validation, and can have their
9693 * commit work done in parallel with other commits not touching
9694 * the same resource. If we have a new DC context as part of
9695 * the DM atomic state from validation we need to free it and
9696 * retain the existing one instead.
9698 * Furthermore, since the DM atomic state only contains the DC
9699 * context and can safely be annulled, we can free the state
9700 * and clear the associated private object now to free
9701 * some memory and avoid a possible use-after-free later.
9704 for (i = 0; i < state->num_private_objs; i++) {
9705 struct drm_private_obj *obj = state->private_objs[i].ptr;
9707 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9708 int j = state->num_private_objs-1;
9710 dm_atomic_destroy_state(obj,
9711 state->private_objs[i].state);
9713 /* If i is not at the end of the array then the
9714 * last element needs to be moved to where i was
9715 * before the array can safely be truncated.
9718 state->private_objs[i] =
9719 state->private_objs[j];
9721 state->private_objs[j].ptr = NULL;
9722 state->private_objs[j].state = NULL;
9723 state->private_objs[j].old_state = NULL;
9724 state->private_objs[j].new_state = NULL;
9726 state->num_private_objs = j;
9732 /* Store the overall update type for use later in atomic check. */
9733 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9734 struct dm_crtc_state *dm_new_crtc_state =
9735 to_dm_crtc_state(new_crtc_state);
9737 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9742 /* Must be success */
9745 trace_amdgpu_dm_atomic_check_finish(state, ret);
9750 if (ret == -EDEADLK)
9751 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9752 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9753 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9755 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9757 trace_amdgpu_dm_atomic_check_finish(state, ret);
9762 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9763 struct amdgpu_dm_connector *amdgpu_dm_connector)
9766 bool capable = false;
9768 if (amdgpu_dm_connector->dc_link &&
9769 dm_helpers_dp_read_dpcd(
9771 amdgpu_dm_connector->dc_link,
9772 DP_DOWN_STREAM_PORT_COUNT,
9774 sizeof(dpcd_data))) {
9775 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9781 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9782 unsigned int offset,
9783 unsigned int total_length,
9785 unsigned int length,
9786 struct amdgpu_hdmi_vsdb_info *vsdb)
9789 union dmub_rb_cmd cmd;
9790 struct dmub_cmd_send_edid_cea *input;
9791 struct dmub_cmd_edid_cea_output *output;
9793 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9796 memset(&cmd, 0, sizeof(cmd));
9798 input = &cmd.edid_cea.data.input;
9800 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9801 cmd.edid_cea.header.sub_type = 0;
9802 cmd.edid_cea.header.payload_bytes =
9803 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9804 input->offset = offset;
9805 input->length = length;
9806 input->cea_total_length = total_length;
9807 memcpy(input->payload, data, length);
9809 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9811 DRM_ERROR("EDID CEA parser failed\n");
9815 output = &cmd.edid_cea.data.output;
9817 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9818 if (!output->ack.success) {
9819 DRM_ERROR("EDID CEA ack failed at offset %d\n",
9820 output->ack.offset);
9822 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9823 if (!output->amd_vsdb.vsdb_found)
9826 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9827 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9828 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9829 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9831 DRM_WARN("Unknown EDID CEA parser results\n");
9838 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9839 uint8_t *edid_ext, int len,
9840 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9844 /* send extension block to DMCU for parsing */
9845 for (i = 0; i < len; i += 8) {
9849 /* send 8 bytes a time */
9850 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9854 /* EDID block sent completed, expect result */
9855 int version, min_rate, max_rate;
9857 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9859 /* amd vsdb found */
9860 vsdb_info->freesync_supported = 1;
9861 vsdb_info->amd_vsdb_version = version;
9862 vsdb_info->min_refresh_rate_hz = min_rate;
9863 vsdb_info->max_refresh_rate_hz = max_rate;
9871 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9879 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9880 uint8_t *edid_ext, int len,
9881 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9885 /* send extension block to DMCU for parsing */
9886 for (i = 0; i < len; i += 8) {
9887 /* send 8 bytes a time */
9888 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
9892 return vsdb_info->freesync_supported;
9895 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9896 uint8_t *edid_ext, int len,
9897 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9899 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9901 if (adev->dm.dmub_srv)
9902 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
9904 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
9907 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
9908 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9910 uint8_t *edid_ext = NULL;
9912 bool valid_vsdb_found = false;
9914 /*----- drm_find_cea_extension() -----*/
9915 /* No EDID or EDID extensions */
9916 if (edid == NULL || edid->extensions == 0)
9919 /* Find CEA extension */
9920 for (i = 0; i < edid->extensions; i++) {
9921 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9922 if (edid_ext[0] == CEA_EXT)
9926 if (i == edid->extensions)
9929 /*----- cea_db_offsets() -----*/
9930 if (edid_ext[0] != CEA_EXT)
9933 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
9935 return valid_vsdb_found ? i : -ENODEV;
9938 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9942 struct detailed_timing *timing;
9943 struct detailed_non_pixel *data;
9944 struct detailed_data_monitor_range *range;
9945 struct amdgpu_dm_connector *amdgpu_dm_connector =
9946 to_amdgpu_dm_connector(connector);
9947 struct dm_connector_state *dm_con_state = NULL;
9948 struct dc_sink *sink;
9950 struct drm_device *dev = connector->dev;
9951 struct amdgpu_device *adev = drm_to_adev(dev);
9952 bool freesync_capable = false;
9953 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
9955 if (!connector->state) {
9956 DRM_ERROR("%s - Connector has no state", __func__);
9960 sink = amdgpu_dm_connector->dc_sink ?
9961 amdgpu_dm_connector->dc_sink :
9962 amdgpu_dm_connector->dc_em_sink;
9964 if (!edid || !sink) {
9965 dm_con_state = to_dm_connector_state(connector->state);
9967 amdgpu_dm_connector->min_vfreq = 0;
9968 amdgpu_dm_connector->max_vfreq = 0;
9969 amdgpu_dm_connector->pixel_clock_mhz = 0;
9970 connector->display_info.monitor_range.min_vfreq = 0;
9971 connector->display_info.monitor_range.max_vfreq = 0;
9972 freesync_capable = false;
9977 dm_con_state = to_dm_connector_state(connector->state);
9979 if (!adev->dm.freesync_module)
9983 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9984 || sink->sink_signal == SIGNAL_TYPE_EDP) {
9985 bool edid_check_required = false;
9988 edid_check_required = is_dp_capable_without_timing_msa(
9990 amdgpu_dm_connector);
9993 if (edid_check_required == true && (edid->version > 1 ||
9994 (edid->version == 1 && edid->revision > 1))) {
9995 for (i = 0; i < 4; i++) {
9997 timing = &edid->detailed_timings[i];
9998 data = &timing->data.other_data;
9999 range = &data->data.range;
10001 * Check if monitor has continuous frequency mode
10003 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10006 * Check for flag range limits only. If flag == 1 then
10007 * no additional timing information provided.
10008 * Default GTF, GTF Secondary curve and CVT are not
10011 if (range->flags != 1)
10014 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10015 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10016 amdgpu_dm_connector->pixel_clock_mhz =
10017 range->pixel_clock_mhz * 10;
10019 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10020 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10025 if (amdgpu_dm_connector->max_vfreq -
10026 amdgpu_dm_connector->min_vfreq > 10) {
10028 freesync_capable = true;
10031 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10032 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10033 if (i >= 0 && vsdb_info.freesync_supported) {
10034 timing = &edid->detailed_timings[i];
10035 data = &timing->data.other_data;
10037 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10038 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10039 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10040 freesync_capable = true;
10042 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10043 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10049 dm_con_state->freesync_capable = freesync_capable;
10051 if (connector->vrr_capable_property)
10052 drm_connector_set_vrr_capable_property(connector,
10056 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10058 struct amdgpu_device *adev = drm_to_adev(dev);
10059 struct dc *dc = adev->dm.dc;
10062 mutex_lock(&adev->dm.dc_lock);
10063 if (dc->current_state) {
10064 for (i = 0; i < dc->current_state->stream_count; ++i)
10065 dc->current_state->streams[i]
10066 ->triggered_crtc_reset.enabled =
10067 adev->dm.force_timing_sync;
10069 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10070 dc_trigger_sync(dc, dc->current_state);
10072 mutex_unlock(&adev->dm.dc_lock);
10075 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10076 uint32_t value, const char *func_name)
10078 #ifdef DM_CHECK_ADDR_0
10079 if (address == 0) {
10080 DC_ERR("invalid register write. address = 0");
10084 cgs_write_register(ctx->cgs_device, address, value);
10085 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10088 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10089 const char *func_name)
10092 #ifdef DM_CHECK_ADDR_0
10093 if (address == 0) {
10094 DC_ERR("invalid register read; address = 0\n");
10099 if (ctx->dmub_srv &&
10100 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10101 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10106 value = cgs_read_register(ctx->cgs_device, address);
10108 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10113 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10114 struct dc_context *ctx,
10115 uint8_t status_type,
10116 uint32_t *operation_result)
10118 struct amdgpu_device *adev = ctx->driver_context;
10119 int return_status = -1;
10120 struct dmub_notification *p_notify = adev->dm.dmub_notify;
10123 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10124 return_status = p_notify->aux_reply.length;
10125 *operation_result = p_notify->result;
10126 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10127 *operation_result = AUX_RET_ERROR_TIMEOUT;
10128 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10129 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10131 *operation_result = AUX_RET_ERROR_UNKNOWN;
10134 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10136 *operation_result = p_notify->sc_status;
10138 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10142 return return_status;
10145 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10146 unsigned int link_index, void *cmd_payload, void *operation_result)
10148 struct amdgpu_device *adev = ctx->driver_context;
10152 dc_process_dmub_aux_transfer_async(ctx->dc,
10153 link_index, (struct aux_payload *)cmd_payload);
10154 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10155 (struct set_config_cmd_payload *)cmd_payload,
10156 adev->dm.dmub_notify)) {
10157 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10158 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10159 (uint32_t *)operation_result);
10162 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
10164 DRM_ERROR("wait_for_completion_timeout timeout!");
10165 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10166 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10167 (uint32_t *)operation_result);
10171 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10172 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
10174 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10175 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10176 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10177 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10178 adev->dm.dmub_notify->aux_reply.length);
10183 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10184 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10185 (uint32_t *)operation_result);
10189 * Check whether seamless boot is supported.
10191 * So far we only support seamless boot on CHIP_VANGOGH.
10192 * If everything goes well, we may consider expanding
10193 * seamless boot to other ASICs.
10195 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10197 switch (adev->asic_type) {
10199 if (!adev->mman.keep_stolen_vga_memory)