2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
132 switch (link->dpcd_caps.dongle_type) {
133 case DISPLAY_DONGLE_NONE:
134 return DRM_MODE_SUBCONNECTOR_Native;
135 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136 return DRM_MODE_SUBCONNECTOR_VGA;
137 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138 case DISPLAY_DONGLE_DP_DVI_DONGLE:
139 return DRM_MODE_SUBCONNECTOR_DVID;
140 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142 return DRM_MODE_SUBCONNECTOR_HDMIA;
143 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
145 return DRM_MODE_SUBCONNECTOR_Unknown;
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
151 struct dc_link *link = aconnector->dc_link;
152 struct drm_connector *connector = &aconnector->base;
153 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
155 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 if (aconnector->dc_sink)
159 subconnector = get_subconnector_type(link);
161 drm_object_property_set_value(&connector->base,
162 connector->dev->mode_config.dp_subconnector_property,
167 * initializes drm_device display related structures, based on the information
168 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169 * drm_encoder, drm_mode_config
171 * Returns 0 on success
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178 struct drm_plane *plane,
179 unsigned long possible_crtcs,
180 const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185 struct amdgpu_dm_connector *amdgpu_dm_connector,
187 struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189 struct amdgpu_encoder *aencoder,
190 uint32_t link_index);
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195 struct drm_atomic_state *state,
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 * dm_vblank_get_counter
216 * Get counter for number of vertical blanks
219 * struct amdgpu_device *adev - [in] desired amdgpu device
220 * int disp_idx - [in] which CRTC to get the counter from
223 * Counter for vertical blanks
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
227 if (crtc >= adev->mode_info.num_crtc)
230 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
232 if (acrtc->dm_irq_params.stream == NULL) {
233 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
238 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
243 u32 *vbl, u32 *position)
245 uint32_t v_blank_start, v_blank_end, h_position, v_position;
247 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
252 if (acrtc->dm_irq_params.stream == NULL) {
253 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
259 * TODO rework base driver to use values directly.
260 * for now parse it back into reg-format
262 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
268 *position = v_position | (h_position << 16);
269 *vbl = v_blank_start | (v_blank_end << 16);
275 static bool dm_is_idle(void *handle)
281 static int dm_wait_for_idle(void *handle)
287 static bool dm_check_soft_reset(void *handle)
292 static int dm_soft_reset(void *handle)
298 static struct amdgpu_crtc *
299 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 struct drm_device *dev = adev_to_drm(adev);
303 struct drm_crtc *crtc;
304 struct amdgpu_crtc *amdgpu_crtc;
306 if (otg_inst == -1) {
308 return adev->mode_info.crtcs[0];
311 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
312 amdgpu_crtc = to_amdgpu_crtc(crtc);
314 if (amdgpu_crtc->otg_inst == otg_inst)
321 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
323 return acrtc->dm_irq_params.freesync_config.state ==
324 VRR_STATE_ACTIVE_VARIABLE ||
325 acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_FIXED;
329 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
331 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
332 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 * dm_pflip_high_irq() - Handle pageflip interrupt
337 * @interrupt_params: ignored
339 * Handles the pageflip interrupt by notifying all interested parties
340 * that the pageflip has been completed.
342 static void dm_pflip_high_irq(void *interrupt_params)
344 struct amdgpu_crtc *amdgpu_crtc;
345 struct common_irq_params *irq_params = interrupt_params;
346 struct amdgpu_device *adev = irq_params->adev;
348 struct drm_pending_vblank_event *e;
349 uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
354 /* IRQ could occur when in initial stage */
355 /* TODO work and BO cleanup */
356 if (amdgpu_crtc == NULL) {
357 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
363 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
364 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
365 amdgpu_crtc->pflip_status,
366 AMDGPU_FLIP_SUBMITTED,
367 amdgpu_crtc->crtc_id,
369 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 /* page flip completed. */
374 e = amdgpu_crtc->event;
375 amdgpu_crtc->event = NULL;
380 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
382 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
384 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
385 &v_blank_end, &hpos, &vpos) ||
386 (vpos < v_blank_start)) {
387 /* Update to correct count and vblank timestamp if racing with
388 * vblank irq. This also updates to the correct vblank timestamp
389 * even in VRR mode, as scanout is past the front-porch atm.
391 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
393 /* Wake up userspace by sending the pageflip event with proper
394 * count and timestamp of vblank of flip completion.
397 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
399 /* Event sent, so done with vblank for this flip */
400 drm_crtc_vblank_put(&amdgpu_crtc->base);
403 /* VRR active and inside front-porch: vblank count and
404 * timestamp for pageflip event will only be up to date after
405 * drm_crtc_handle_vblank() has been executed from late vblank
406 * irq handler after start of back-porch (vline 0). We queue the
407 * pageflip event for send-out by drm_crtc_handle_vblank() with
408 * updated timestamp and count, once it runs after us.
410 * We need to open-code this instead of using the helper
411 * drm_crtc_arm_vblank_event(), as that helper would
412 * call drm_crtc_accurate_vblank_count(), which we must
413 * not call in VRR mode while we are in front-porch!
416 /* sequence will be replaced by real count during send-out. */
417 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
418 e->pipe = amdgpu_crtc->crtc_id;
420 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 /* Keep track of vblank of this flip for flip throttling. We use the
425 * cooked hw counter, as that one incremented at start of this vblank
426 * of pageflip completion, so last_flip_vblank is the forbidden count
427 * for queueing new pageflips if vsync + VRR is enabled.
429 amdgpu_crtc->dm_irq_params.last_flip_vblank =
430 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
432 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
433 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
435 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
436 amdgpu_crtc->crtc_id, amdgpu_crtc,
437 vrr_active, (int) !e);
440 static void dm_vupdate_high_irq(void *interrupt_params)
442 struct common_irq_params *irq_params = interrupt_params;
443 struct amdgpu_device *adev = irq_params->adev;
444 struct amdgpu_crtc *acrtc;
448 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
453 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 /* Core vblank handling is done here after end of front-porch in
458 * vrr mode, as vblank timestamping will give valid results
459 * while now done after front-porch. This will also deliver
460 * page-flip completion events that have been queued to us
461 * if a pageflip happened inside front-porch.
464 drm_crtc_handle_vblank(&acrtc->base);
466 /* BTR processing for pre-DCE12 ASICs */
467 if (acrtc->dm_irq_params.stream &&
468 adev->family < AMDGPU_FAMILY_AI) {
469 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
470 mod_freesync_handle_v_update(
471 adev->dm.freesync_module,
472 acrtc->dm_irq_params.stream,
473 &acrtc->dm_irq_params.vrr_params);
475 dc_stream_adjust_vmin_vmax(
477 acrtc->dm_irq_params.stream,
478 &acrtc->dm_irq_params.vrr_params.adjust);
479 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
486 * dm_crtc_high_irq() - Handles CRTC interrupt
487 * @interrupt_params: used for determining the CRTC instance
489 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492 static void dm_crtc_high_irq(void *interrupt_params)
494 struct common_irq_params *irq_params = interrupt_params;
495 struct amdgpu_device *adev = irq_params->adev;
496 struct amdgpu_crtc *acrtc;
500 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
506 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
507 vrr_active, acrtc->dm_irq_params.active_planes);
510 * Core vblank handling at start of front-porch is only possible
511 * in non-vrr mode, as only there vblank timestamping will give
512 * valid results while done in front-porch. Otherwise defer it
513 * to dm_vupdate_high_irq after end of front-porch.
516 drm_crtc_handle_vblank(&acrtc->base);
519 * Following stuff must happen at start of vblank, for crc
520 * computation and below-the-range btr support in vrr mode.
522 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
524 /* BTR updates need to happen before VUPDATE on Vega and above. */
525 if (adev->family < AMDGPU_FAMILY_AI)
528 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
530 if (acrtc->dm_irq_params.stream &&
531 acrtc->dm_irq_params.vrr_params.supported &&
532 acrtc->dm_irq_params.freesync_config.state ==
533 VRR_STATE_ACTIVE_VARIABLE) {
534 mod_freesync_handle_v_update(adev->dm.freesync_module,
535 acrtc->dm_irq_params.stream,
536 &acrtc->dm_irq_params.vrr_params);
538 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params.adjust);
543 * If there aren't any active_planes then DCH HUBP may be clock-gated.
544 * In that case, pageflip completion interrupts won't fire and pageflip
545 * completion events won't get delivered. Prevent this by sending
546 * pending pageflip events from here if a flip is still pending.
548 * If any planes are enabled, use dm_pflip_high_irq() instead, to
549 * avoid race conditions between flip programming and completion,
550 * which could cause too early flip completion events.
552 if (adev->family >= AMDGPU_FAMILY_RV &&
553 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
554 acrtc->dm_irq_params.active_planes == 0) {
556 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
558 drm_crtc_vblank_put(&acrtc->base);
560 acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 static int dm_set_clockgating_state(void *handle,
567 enum amd_clockgating_state state)
572 static int dm_set_powergating_state(void *handle,
573 enum amd_powergating_state state)
578 /* Prototypes of private functions */
579 static int dm_early_init(void* handle);
581 /* Allocate memory for FBC compressed data */
582 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
584 struct drm_device *dev = connector->dev;
585 struct amdgpu_device *adev = drm_to_adev(dev);
586 struct dm_comressor_info *compressor = &adev->dm.compressor;
587 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
588 struct drm_display_mode *mode;
589 unsigned long max_size = 0;
591 if (adev->dm.dc->fbc_compressor == NULL)
594 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 if (compressor->bo_ptr)
601 list_for_each_entry(mode, &connector->modes, head) {
602 if (max_size < mode->htotal * mode->vtotal)
603 max_size = mode->htotal * mode->vtotal;
607 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
608 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
609 &compressor->gpu_addr, &compressor->cpu_addr);
612 DRM_ERROR("DM: Failed to initialize FBC\n");
614 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
615 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
622 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
623 int pipe, bool *enabled,
624 unsigned char *buf, int max_bytes)
626 struct drm_device *dev = dev_get_drvdata(kdev);
627 struct amdgpu_device *adev = drm_to_adev(dev);
628 struct drm_connector *connector;
629 struct drm_connector_list_iter conn_iter;
630 struct amdgpu_dm_connector *aconnector;
635 mutex_lock(&adev->dm.audio_lock);
637 drm_connector_list_iter_begin(dev, &conn_iter);
638 drm_for_each_connector_iter(connector, &conn_iter) {
639 aconnector = to_amdgpu_dm_connector(connector);
640 if (aconnector->audio_inst != port)
644 ret = drm_eld_size(connector->eld);
645 memcpy(buf, connector->eld, min(max_bytes, ret));
649 drm_connector_list_iter_end(&conn_iter);
651 mutex_unlock(&adev->dm.audio_lock);
653 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
658 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
659 .get_eld = amdgpu_dm_audio_component_get_eld,
662 static int amdgpu_dm_audio_component_bind(struct device *kdev,
663 struct device *hda_kdev, void *data)
665 struct drm_device *dev = dev_get_drvdata(kdev);
666 struct amdgpu_device *adev = drm_to_adev(dev);
667 struct drm_audio_component *acomp = data;
669 acomp->ops = &amdgpu_dm_audio_component_ops;
671 adev->dm.audio_component = acomp;
676 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
677 struct device *hda_kdev, void *data)
679 struct drm_device *dev = dev_get_drvdata(kdev);
680 struct amdgpu_device *adev = drm_to_adev(dev);
681 struct drm_audio_component *acomp = data;
685 adev->dm.audio_component = NULL;
688 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
689 .bind = amdgpu_dm_audio_component_bind,
690 .unbind = amdgpu_dm_audio_component_unbind,
693 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
700 adev->mode_info.audio.enabled = true;
702 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
704 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
705 adev->mode_info.audio.pin[i].channels = -1;
706 adev->mode_info.audio.pin[i].rate = -1;
707 adev->mode_info.audio.pin[i].bits_per_sample = -1;
708 adev->mode_info.audio.pin[i].status_bits = 0;
709 adev->mode_info.audio.pin[i].category_code = 0;
710 adev->mode_info.audio.pin[i].connected = false;
711 adev->mode_info.audio.pin[i].id =
712 adev->dm.dc->res_pool->audios[i]->inst;
713 adev->mode_info.audio.pin[i].offset = 0;
716 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 adev->dm.audio_registered = true;
725 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730 if (!adev->mode_info.audio.enabled)
733 if (adev->dm.audio_registered) {
734 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
735 adev->dm.audio_registered = false;
738 /* TODO: Disable audio? */
740 adev->mode_info.audio.enabled = false;
743 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
745 struct drm_audio_component *acomp = adev->dm.audio_component;
747 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
748 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
750 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 static int dm_dmub_hw_init(struct amdgpu_device *adev)
757 const struct dmcub_firmware_header_v1_0 *hdr;
758 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
759 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
760 const struct firmware *dmub_fw = adev->dm.dmub_fw;
761 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
762 struct abm *abm = adev->dm.dc->res_pool->abm;
763 struct dmub_srv_hw_params hw_params;
764 enum dmub_status status;
765 const unsigned char *fw_inst_const, *fw_bss_data;
766 uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 /* DMUB isn't supported on the ASIC. */
774 DRM_ERROR("No framebuffer info for DMUB service.\n");
779 /* Firmware required for DMUB support. */
780 DRM_ERROR("No firmware provided for DMUB.\n");
784 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
785 if (status != DMUB_STATUS_OK) {
786 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 if (!has_hw_support) {
791 DRM_INFO("DMUB unsupported on ASIC\n");
795 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
797 fw_inst_const = dmub_fw->data +
798 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 fw_bss_data = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803 le32_to_cpu(hdr->inst_const_bytes);
805 /* Copy firmware and bios info into FB memory. */
806 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
807 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
809 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
811 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
812 * amdgpu_ucode_init_single_fw will load dmub firmware
813 * fw_inst_const part to cw0; otherwise, the firmware back door load
814 * will be done by dm_dmub_hw_init
816 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
817 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 if (fw_bss_data_size)
822 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
823 fw_bss_data, fw_bss_data_size);
825 /* Copy firmware bios info into FB memory. */
826 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 /* Reset regions that need to be reset. */
830 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
831 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
833 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
836 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
839 /* Initialize hardware. */
840 memset(&hw_params, 0, sizeof(hw_params));
841 hw_params.fb_base = adev->gmc.fb_start;
842 hw_params.fb_offset = adev->gmc.aper_base;
844 /* backdoor load firmware and trigger dmub running */
845 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
846 hw_params.load_inst_const = true;
849 hw_params.psp_version = dmcu->psp_version;
851 for (i = 0; i < fb_info->num_fb; ++i)
852 hw_params.fb[i] = &fb_info->fb[i];
854 status = dmub_srv_hw_init(dmub_srv, &hw_params);
855 if (status != DMUB_STATUS_OK) {
856 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 /* Wait for firmware load to finish. */
861 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
862 if (status != DMUB_STATUS_OK)
863 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
865 /* Init DMCU and ABM if available. */
867 dmcu->funcs->dmcu_init(dmcu);
868 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
872 if (!adev->dm.dc->ctx->dmub_srv) {
873 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
878 adev->dm.dmcub_fw_version);
883 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
884 struct drm_atomic_state *state)
886 struct drm_connector *connector;
887 struct drm_crtc *crtc;
888 struct amdgpu_dm_connector *amdgpu_dm_connector;
889 struct drm_connector_state *conn_state;
890 struct dm_crtc_state *acrtc_state;
891 struct drm_crtc_state *crtc_state;
892 struct dc_stream_state *stream;
893 struct drm_device *dev = adev_to_drm(adev);
895 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
897 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
898 conn_state = connector->state;
900 if (!(conn_state && conn_state->crtc))
903 crtc = conn_state->crtc;
904 acrtc_state = to_dm_crtc_state(crtc->state);
906 if (!(acrtc_state && acrtc_state->stream))
909 stream = acrtc_state->stream;
911 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
912 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
913 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
914 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
915 conn_state = drm_atomic_get_connector_state(state, connector);
916 crtc_state = drm_atomic_get_crtc_state(state, crtc);
917 crtc_state->mode_changed = true;
922 static int amdgpu_dm_init(struct amdgpu_device *adev)
924 struct dc_init_data init_data;
925 #ifdef CONFIG_DRM_AMD_DC_HDCP
926 struct dc_callback_init init_params;
930 adev->dm.ddev = adev_to_drm(adev);
931 adev->dm.adev = adev;
933 /* Zero all the fields */
934 memset(&init_data, 0, sizeof(init_data));
935 #ifdef CONFIG_DRM_AMD_DC_HDCP
936 memset(&init_params, 0, sizeof(init_params));
939 mutex_init(&adev->dm.dc_lock);
940 mutex_init(&adev->dm.audio_lock);
942 if(amdgpu_dm_irq_init(adev)) {
943 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
947 init_data.asic_id.chip_family = adev->family;
949 init_data.asic_id.pci_revision_id = adev->pdev->revision;
950 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
952 init_data.asic_id.vram_width = adev->gmc.vram_width;
953 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
954 init_data.asic_id.atombios_base_address =
955 adev->mode_info.atom_context->bios;
957 init_data.driver = adev;
959 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
961 if (!adev->dm.cgs_device) {
962 DRM_ERROR("amdgpu: failed to create cgs device.\n");
966 init_data.cgs_device = adev->dm.cgs_device;
968 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
970 switch (adev->asic_type) {
975 init_data.flags.gpu_vm_support = true;
981 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
982 init_data.flags.fbc_support = true;
984 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
985 init_data.flags.multi_mon_pp_mclk_switch = true;
987 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
988 init_data.flags.disable_fractional_pwm = true;
990 init_data.flags.power_down_display_on_boot = true;
992 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
994 /* Display Core create. */
995 adev->dm.dc = dc_create(&init_data);
998 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1000 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1004 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1005 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1006 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1009 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1010 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1012 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1013 adev->dm.dc->debug.disable_stutter = true;
1015 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1016 adev->dm.dc->debug.disable_dsc = true;
1018 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1019 adev->dm.dc->debug.disable_clock_gate = true;
1021 r = dm_dmub_hw_init(adev);
1023 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1027 dc_hardware_init(adev->dm.dc);
1029 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1030 if (!adev->dm.freesync_module) {
1032 "amdgpu: failed to initialize freesync_module.\n");
1034 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1035 adev->dm.freesync_module);
1037 amdgpu_dm_init_color_mod();
1039 #ifdef CONFIG_DRM_AMD_DC_HDCP
1040 if (adev->asic_type >= CHIP_RAVEN) {
1041 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1043 if (!adev->dm.hdcp_workqueue)
1044 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1046 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1048 dc_init_callbacks(adev->dm.dc, &init_params);
1051 if (amdgpu_dm_initialize_drm_device(adev)) {
1053 "amdgpu: failed to initialize sw for display support.\n");
1057 /* Update the actual used number of crtc */
1058 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1060 /* create fake encoders for MST */
1061 dm_dp_create_fake_mst_encoders(adev);
1063 /* TODO: Add_display_info? */
1065 /* TODO use dynamic cursor width */
1066 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1067 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1069 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1071 "amdgpu: failed to initialize sw for display support.\n");
1075 DRM_DEBUG_DRIVER("KMS initialized.\n");
1079 amdgpu_dm_fini(adev);
1084 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1088 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1089 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1092 amdgpu_dm_audio_fini(adev);
1094 amdgpu_dm_destroy_drm_device(&adev->dm);
1096 #ifdef CONFIG_DRM_AMD_DC_HDCP
1097 if (adev->dm.hdcp_workqueue) {
1098 hdcp_destroy(adev->dm.hdcp_workqueue);
1099 adev->dm.hdcp_workqueue = NULL;
1103 dc_deinit_callbacks(adev->dm.dc);
1105 if (adev->dm.dc->ctx->dmub_srv) {
1106 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1107 adev->dm.dc->ctx->dmub_srv = NULL;
1110 if (adev->dm.dmub_bo)
1111 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1112 &adev->dm.dmub_bo_gpu_addr,
1113 &adev->dm.dmub_bo_cpu_addr);
1115 /* DC Destroy TODO: Replace destroy DAL */
1117 dc_destroy(&adev->dm.dc);
1119 * TODO: pageflip, vlank interrupt
1121 * amdgpu_dm_irq_fini(adev);
1124 if (adev->dm.cgs_device) {
1125 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1126 adev->dm.cgs_device = NULL;
1128 if (adev->dm.freesync_module) {
1129 mod_freesync_destroy(adev->dm.freesync_module);
1130 adev->dm.freesync_module = NULL;
1133 mutex_destroy(&adev->dm.audio_lock);
1134 mutex_destroy(&adev->dm.dc_lock);
1139 static int load_dmcu_fw(struct amdgpu_device *adev)
1141 const char *fw_name_dmcu = NULL;
1143 const struct dmcu_firmware_header_v1_0 *hdr;
1145 switch(adev->asic_type) {
1146 #if defined(CONFIG_DRM_AMD_DC_SI)
1161 case CHIP_POLARIS11:
1162 case CHIP_POLARIS10:
1163 case CHIP_POLARIS12:
1171 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1172 case CHIP_SIENNA_CICHLID:
1173 case CHIP_NAVY_FLOUNDER:
1177 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1180 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1181 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1182 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1183 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1188 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1192 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1193 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1197 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1199 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1200 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1201 adev->dm.fw_dmcu = NULL;
1205 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1210 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1212 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1214 release_firmware(adev->dm.fw_dmcu);
1215 adev->dm.fw_dmcu = NULL;
1219 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1220 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1221 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1222 adev->firmware.fw_size +=
1223 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1227 adev->firmware.fw_size +=
1228 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1230 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1232 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1237 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1239 struct amdgpu_device *adev = ctx;
1241 return dm_read_reg(adev->dm.dc->ctx, address);
1244 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1247 struct amdgpu_device *adev = ctx;
1249 return dm_write_reg(adev->dm.dc->ctx, address, value);
1252 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1254 struct dmub_srv_create_params create_params;
1255 struct dmub_srv_region_params region_params;
1256 struct dmub_srv_region_info region_info;
1257 struct dmub_srv_fb_params fb_params;
1258 struct dmub_srv_fb_info *fb_info;
1259 struct dmub_srv *dmub_srv;
1260 const struct dmcub_firmware_header_v1_0 *hdr;
1261 const char *fw_name_dmub;
1262 enum dmub_asic dmub_asic;
1263 enum dmub_status status;
1266 switch (adev->asic_type) {
1268 dmub_asic = DMUB_ASIC_DCN21;
1269 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1271 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1272 case CHIP_SIENNA_CICHLID:
1273 dmub_asic = DMUB_ASIC_DCN30;
1274 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1276 case CHIP_NAVY_FLOUNDER:
1277 dmub_asic = DMUB_ASIC_DCN30;
1278 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1283 /* ASIC doesn't support DMUB. */
1287 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1289 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1293 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1295 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1299 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1301 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1302 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1303 AMDGPU_UCODE_ID_DMCUB;
1304 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1306 adev->firmware.fw_size +=
1307 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1309 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1310 adev->dm.dmcub_fw_version);
1313 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1315 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1316 dmub_srv = adev->dm.dmub_srv;
1319 DRM_ERROR("Failed to allocate DMUB service!\n");
1323 memset(&create_params, 0, sizeof(create_params));
1324 create_params.user_ctx = adev;
1325 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1326 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1327 create_params.asic = dmub_asic;
1329 /* Create the DMUB service. */
1330 status = dmub_srv_create(dmub_srv, &create_params);
1331 if (status != DMUB_STATUS_OK) {
1332 DRM_ERROR("Error creating DMUB service: %d\n", status);
1336 /* Calculate the size of all the regions for the DMUB service. */
1337 memset(®ion_params, 0, sizeof(region_params));
1339 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1340 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1341 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1342 region_params.vbios_size = adev->bios_size;
1343 region_params.fw_bss_data = region_params.bss_data_size ?
1344 adev->dm.dmub_fw->data +
1345 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1346 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1347 region_params.fw_inst_const =
1348 adev->dm.dmub_fw->data +
1349 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1352 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1355 if (status != DMUB_STATUS_OK) {
1356 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1361 * Allocate a framebuffer based on the total size of all the regions.
1362 * TODO: Move this into GART.
1364 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1365 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1366 &adev->dm.dmub_bo_gpu_addr,
1367 &adev->dm.dmub_bo_cpu_addr);
1371 /* Rebase the regions on the framebuffer address. */
1372 memset(&fb_params, 0, sizeof(fb_params));
1373 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1374 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1375 fb_params.region_info = ®ion_info;
1377 adev->dm.dmub_fb_info =
1378 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1379 fb_info = adev->dm.dmub_fb_info;
1383 "Failed to allocate framebuffer info for DMUB service!\n");
1387 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1388 if (status != DMUB_STATUS_OK) {
1389 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1396 static int dm_sw_init(void *handle)
1398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1401 r = dm_dmub_sw_init(adev);
1405 return load_dmcu_fw(adev);
1408 static int dm_sw_fini(void *handle)
1410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1412 kfree(adev->dm.dmub_fb_info);
1413 adev->dm.dmub_fb_info = NULL;
1415 if (adev->dm.dmub_srv) {
1416 dmub_srv_destroy(adev->dm.dmub_srv);
1417 adev->dm.dmub_srv = NULL;
1420 release_firmware(adev->dm.dmub_fw);
1421 adev->dm.dmub_fw = NULL;
1423 release_firmware(adev->dm.fw_dmcu);
1424 adev->dm.fw_dmcu = NULL;
1429 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1431 struct amdgpu_dm_connector *aconnector;
1432 struct drm_connector *connector;
1433 struct drm_connector_list_iter iter;
1436 drm_connector_list_iter_begin(dev, &iter);
1437 drm_for_each_connector_iter(connector, &iter) {
1438 aconnector = to_amdgpu_dm_connector(connector);
1439 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1440 aconnector->mst_mgr.aux) {
1441 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1443 aconnector->base.base.id);
1445 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1447 DRM_ERROR("DM_MST: Failed to start MST\n");
1448 aconnector->dc_link->type =
1449 dc_connection_single;
1454 drm_connector_list_iter_end(&iter);
1459 static int dm_late_init(void *handle)
1461 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1463 struct dmcu_iram_parameters params;
1464 unsigned int linear_lut[16];
1466 struct dmcu *dmcu = NULL;
1469 dmcu = adev->dm.dc->res_pool->dmcu;
1471 for (i = 0; i < 16; i++)
1472 linear_lut[i] = 0xFFFF * i / 15;
1475 params.backlight_ramping_start = 0xCCCC;
1476 params.backlight_ramping_reduction = 0xCCCCCCCC;
1477 params.backlight_lut_array_size = 16;
1478 params.backlight_lut_array = linear_lut;
1480 /* Min backlight level after ABM reduction, Don't allow below 1%
1481 * 0xFFFF x 0.01 = 0x28F
1483 params.min_abm_backlight = 0x28F;
1485 /* In the case where abm is implemented on dmcub,
1486 * dmcu object will be null.
1487 * ABM 2.4 and up are implemented on dmcub.
1490 ret = dmcu_load_iram(dmcu, params);
1491 else if (adev->dm.dc->ctx->dmub_srv)
1492 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1497 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1500 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1502 struct amdgpu_dm_connector *aconnector;
1503 struct drm_connector *connector;
1504 struct drm_connector_list_iter iter;
1505 struct drm_dp_mst_topology_mgr *mgr;
1507 bool need_hotplug = false;
1509 drm_connector_list_iter_begin(dev, &iter);
1510 drm_for_each_connector_iter(connector, &iter) {
1511 aconnector = to_amdgpu_dm_connector(connector);
1512 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1513 aconnector->mst_port)
1516 mgr = &aconnector->mst_mgr;
1519 drm_dp_mst_topology_mgr_suspend(mgr);
1521 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1523 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1524 need_hotplug = true;
1528 drm_connector_list_iter_end(&iter);
1531 drm_kms_helper_hotplug_event(dev);
1534 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1536 struct smu_context *smu = &adev->smu;
1539 if (!is_support_sw_smu(adev))
1542 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1543 * on window driver dc implementation.
1544 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1545 * should be passed to smu during boot up and resume from s3.
1546 * boot up: dc calculate dcn watermark clock settings within dc_create,
1547 * dcn20_resource_construct
1548 * then call pplib functions below to pass the settings to smu:
1549 * smu_set_watermarks_for_clock_ranges
1550 * smu_set_watermarks_table
1551 * navi10_set_watermarks_table
1552 * smu_write_watermarks_table
1554 * For Renoir, clock settings of dcn watermark are also fixed values.
1555 * dc has implemented different flow for window driver:
1556 * dc_hardware_init / dc_set_power_state
1561 * smu_set_watermarks_for_clock_ranges
1562 * renoir_set_watermarks_table
1563 * smu_write_watermarks_table
1566 * dc_hardware_init -> amdgpu_dm_init
1567 * dc_set_power_state --> dm_resume
1569 * therefore, this function apply to navi10/12/14 but not Renoir
1572 switch(adev->asic_type) {
1581 ret = smu_write_watermarks_table(smu);
1583 DRM_ERROR("Failed to update WMTABLE!\n");
1591 * dm_hw_init() - Initialize DC device
1592 * @handle: The base driver device containing the amdgpu_dm device.
1594 * Initialize the &struct amdgpu_display_manager device. This involves calling
1595 * the initializers of each DM component, then populating the struct with them.
1597 * Although the function implies hardware initialization, both hardware and
1598 * software are initialized here. Splitting them out to their relevant init
1599 * hooks is a future TODO item.
1601 * Some notable things that are initialized here:
1603 * - Display Core, both software and hardware
1604 * - DC modules that we need (freesync and color management)
1605 * - DRM software states
1606 * - Interrupt sources and handlers
1608 * - Debug FS entries, if enabled
1610 static int dm_hw_init(void *handle)
1612 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613 /* Create DAL display manager */
1614 amdgpu_dm_init(adev);
1615 amdgpu_dm_hpd_init(adev);
1621 * dm_hw_fini() - Teardown DC device
1622 * @handle: The base driver device containing the amdgpu_dm device.
1624 * Teardown components within &struct amdgpu_display_manager that require
1625 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1626 * were loaded. Also flush IRQ workqueues and disable them.
1628 static int dm_hw_fini(void *handle)
1630 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1632 amdgpu_dm_hpd_fini(adev);
1634 amdgpu_dm_irq_fini(adev);
1635 amdgpu_dm_fini(adev);
1640 static int dm_enable_vblank(struct drm_crtc *crtc);
1641 static void dm_disable_vblank(struct drm_crtc *crtc);
1643 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1644 struct dc_state *state, bool enable)
1646 enum dc_irq_source irq_source;
1647 struct amdgpu_crtc *acrtc;
1651 for (i = 0; i < state->stream_count; i++) {
1652 acrtc = get_crtc_by_otg_inst(
1653 adev, state->stream_status[i].primary_otg_inst);
1655 if (acrtc && state->stream_status[i].plane_count != 0) {
1656 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1657 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1658 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1659 acrtc->crtc_id, enable ? "en" : "dis", rc);
1661 DRM_WARN("Failed to %s pflip interrupts\n",
1662 enable ? "enable" : "disable");
1665 rc = dm_enable_vblank(&acrtc->base);
1667 DRM_WARN("Failed to enable vblank interrupts\n");
1669 dm_disable_vblank(&acrtc->base);
1677 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1679 struct dc_state *context = NULL;
1680 enum dc_status res = DC_ERROR_UNEXPECTED;
1682 struct dc_stream_state *del_streams[MAX_PIPES];
1683 int del_streams_count = 0;
1685 memset(del_streams, 0, sizeof(del_streams));
1687 context = dc_create_state(dc);
1688 if (context == NULL)
1689 goto context_alloc_fail;
1691 dc_resource_state_copy_construct_current(dc, context);
1693 /* First remove from context all streams */
1694 for (i = 0; i < context->stream_count; i++) {
1695 struct dc_stream_state *stream = context->streams[i];
1697 del_streams[del_streams_count++] = stream;
1700 /* Remove all planes for removed streams and then remove the streams */
1701 for (i = 0; i < del_streams_count; i++) {
1702 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1703 res = DC_FAIL_DETACH_SURFACES;
1707 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1713 res = dc_validate_global_state(dc, context, false);
1716 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1720 res = dc_commit_state(dc, context);
1723 dc_release_state(context);
1729 static int dm_suspend(void *handle)
1731 struct amdgpu_device *adev = handle;
1732 struct amdgpu_display_manager *dm = &adev->dm;
1735 if (amdgpu_in_reset(adev)) {
1736 mutex_lock(&dm->dc_lock);
1737 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1739 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1741 amdgpu_dm_commit_zero_streams(dm->dc);
1743 amdgpu_dm_irq_suspend(adev);
1748 WARN_ON(adev->dm.cached_state);
1749 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1751 s3_handle_mst(adev_to_drm(adev), true);
1753 amdgpu_dm_irq_suspend(adev);
1756 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1761 static struct amdgpu_dm_connector *
1762 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1763 struct drm_crtc *crtc)
1766 struct drm_connector_state *new_con_state;
1767 struct drm_connector *connector;
1768 struct drm_crtc *crtc_from_state;
1770 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1771 crtc_from_state = new_con_state->crtc;
1773 if (crtc_from_state == crtc)
1774 return to_amdgpu_dm_connector(connector);
1780 static void emulated_link_detect(struct dc_link *link)
1782 struct dc_sink_init_data sink_init_data = { 0 };
1783 struct display_sink_capability sink_caps = { 0 };
1784 enum dc_edid_status edid_status;
1785 struct dc_context *dc_ctx = link->ctx;
1786 struct dc_sink *sink = NULL;
1787 struct dc_sink *prev_sink = NULL;
1789 link->type = dc_connection_none;
1790 prev_sink = link->local_sink;
1792 if (prev_sink != NULL)
1793 dc_sink_retain(prev_sink);
1795 switch (link->connector_signal) {
1796 case SIGNAL_TYPE_HDMI_TYPE_A: {
1797 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1798 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1802 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1803 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1808 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1809 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1814 case SIGNAL_TYPE_LVDS: {
1815 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 sink_caps.signal = SIGNAL_TYPE_LVDS;
1820 case SIGNAL_TYPE_EDP: {
1821 sink_caps.transaction_type =
1822 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1823 sink_caps.signal = SIGNAL_TYPE_EDP;
1827 case SIGNAL_TYPE_DISPLAY_PORT: {
1828 sink_caps.transaction_type =
1829 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1830 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1835 DC_ERROR("Invalid connector type! signal:%d\n",
1836 link->connector_signal);
1840 sink_init_data.link = link;
1841 sink_init_data.sink_signal = sink_caps.signal;
1843 sink = dc_sink_create(&sink_init_data);
1845 DC_ERROR("Failed to create sink!\n");
1849 /* dc_sink_create returns a new reference */
1850 link->local_sink = sink;
1852 edid_status = dm_helpers_read_local_edid(
1857 if (edid_status != EDID_OK)
1858 DC_ERROR("Failed to read EDID");
1862 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1863 struct amdgpu_display_manager *dm)
1866 struct dc_surface_update surface_updates[MAX_SURFACES];
1867 struct dc_plane_info plane_infos[MAX_SURFACES];
1868 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1869 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1870 struct dc_stream_update stream_update;
1874 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1877 dm_error("Failed to allocate update bundle\n");
1881 for (k = 0; k < dc_state->stream_count; k++) {
1882 bundle->stream_update.stream = dc_state->streams[k];
1884 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1885 bundle->surface_updates[m].surface =
1886 dc_state->stream_status->plane_states[m];
1887 bundle->surface_updates[m].surface->force_full_update =
1890 dc_commit_updates_for_stream(
1891 dm->dc, bundle->surface_updates,
1892 dc_state->stream_status->plane_count,
1893 dc_state->streams[k], &bundle->stream_update, dc_state);
1902 static int dm_resume(void *handle)
1904 struct amdgpu_device *adev = handle;
1905 struct drm_device *ddev = adev_to_drm(adev);
1906 struct amdgpu_display_manager *dm = &adev->dm;
1907 struct amdgpu_dm_connector *aconnector;
1908 struct drm_connector *connector;
1909 struct drm_connector_list_iter iter;
1910 struct drm_crtc *crtc;
1911 struct drm_crtc_state *new_crtc_state;
1912 struct dm_crtc_state *dm_new_crtc_state;
1913 struct drm_plane *plane;
1914 struct drm_plane_state *new_plane_state;
1915 struct dm_plane_state *dm_new_plane_state;
1916 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1917 enum dc_connection_type new_connection_type = dc_connection_none;
1918 struct dc_state *dc_state;
1921 if (amdgpu_in_reset(adev)) {
1922 dc_state = dm->cached_dc_state;
1924 r = dm_dmub_hw_init(adev);
1926 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1928 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1931 amdgpu_dm_irq_resume_early(adev);
1933 for (i = 0; i < dc_state->stream_count; i++) {
1934 dc_state->streams[i]->mode_changed = true;
1935 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1936 dc_state->stream_status->plane_states[j]->update_flags.raw
1941 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1943 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1945 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1947 dc_release_state(dm->cached_dc_state);
1948 dm->cached_dc_state = NULL;
1950 amdgpu_dm_irq_resume_late(adev);
1952 mutex_unlock(&dm->dc_lock);
1956 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1957 dc_release_state(dm_state->context);
1958 dm_state->context = dc_create_state(dm->dc);
1959 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1960 dc_resource_state_construct(dm->dc, dm_state->context);
1962 /* Before powering on DC we need to re-initialize DMUB. */
1963 r = dm_dmub_hw_init(adev);
1965 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1967 /* power on hardware */
1968 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1970 /* program HPD filter */
1974 * early enable HPD Rx IRQ, should be done before set mode as short
1975 * pulse interrupts are used for MST
1977 amdgpu_dm_irq_resume_early(adev);
1979 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1980 s3_handle_mst(ddev, false);
1983 drm_connector_list_iter_begin(ddev, &iter);
1984 drm_for_each_connector_iter(connector, &iter) {
1985 aconnector = to_amdgpu_dm_connector(connector);
1988 * this is the case when traversing through already created
1989 * MST connectors, should be skipped
1991 if (aconnector->mst_port)
1994 mutex_lock(&aconnector->hpd_lock);
1995 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1996 DRM_ERROR("KMS: Failed to detect connector\n");
1998 if (aconnector->base.force && new_connection_type == dc_connection_none)
1999 emulated_link_detect(aconnector->dc_link);
2001 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2003 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2004 aconnector->fake_enable = false;
2006 if (aconnector->dc_sink)
2007 dc_sink_release(aconnector->dc_sink);
2008 aconnector->dc_sink = NULL;
2009 amdgpu_dm_update_connector_after_detect(aconnector);
2010 mutex_unlock(&aconnector->hpd_lock);
2012 drm_connector_list_iter_end(&iter);
2014 /* Force mode set in atomic commit */
2015 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2016 new_crtc_state->active_changed = true;
2019 * atomic_check is expected to create the dc states. We need to release
2020 * them here, since they were duplicated as part of the suspend
2023 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2024 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2025 if (dm_new_crtc_state->stream) {
2026 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2027 dc_stream_release(dm_new_crtc_state->stream);
2028 dm_new_crtc_state->stream = NULL;
2032 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2033 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2034 if (dm_new_plane_state->dc_state) {
2035 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2036 dc_plane_state_release(dm_new_plane_state->dc_state);
2037 dm_new_plane_state->dc_state = NULL;
2041 drm_atomic_helper_resume(ddev, dm->cached_state);
2043 dm->cached_state = NULL;
2045 amdgpu_dm_irq_resume_late(adev);
2047 amdgpu_dm_smu_write_watermarks_table(adev);
2055 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2056 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2057 * the base driver's device list to be initialized and torn down accordingly.
2059 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2062 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2064 .early_init = dm_early_init,
2065 .late_init = dm_late_init,
2066 .sw_init = dm_sw_init,
2067 .sw_fini = dm_sw_fini,
2068 .hw_init = dm_hw_init,
2069 .hw_fini = dm_hw_fini,
2070 .suspend = dm_suspend,
2071 .resume = dm_resume,
2072 .is_idle = dm_is_idle,
2073 .wait_for_idle = dm_wait_for_idle,
2074 .check_soft_reset = dm_check_soft_reset,
2075 .soft_reset = dm_soft_reset,
2076 .set_clockgating_state = dm_set_clockgating_state,
2077 .set_powergating_state = dm_set_powergating_state,
2080 const struct amdgpu_ip_block_version dm_ip_block =
2082 .type = AMD_IP_BLOCK_TYPE_DCE,
2086 .funcs = &amdgpu_dm_funcs,
2096 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2097 .fb_create = amdgpu_display_user_framebuffer_create,
2098 .output_poll_changed = drm_fb_helper_output_poll_changed,
2099 .atomic_check = amdgpu_dm_atomic_check,
2100 .atomic_commit = amdgpu_dm_atomic_commit,
2103 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2104 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2107 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2109 u32 max_cll, min_cll, max, min, q, r;
2110 struct amdgpu_dm_backlight_caps *caps;
2111 struct amdgpu_display_manager *dm;
2112 struct drm_connector *conn_base;
2113 struct amdgpu_device *adev;
2114 struct dc_link *link = NULL;
2115 static const u8 pre_computed_values[] = {
2116 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2117 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2119 if (!aconnector || !aconnector->dc_link)
2122 link = aconnector->dc_link;
2123 if (link->connector_signal != SIGNAL_TYPE_EDP)
2126 conn_base = &aconnector->base;
2127 adev = drm_to_adev(conn_base->dev);
2129 caps = &dm->backlight_caps;
2130 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2131 caps->aux_support = false;
2132 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2133 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2135 if (caps->ext_caps->bits.oled == 1 ||
2136 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2137 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2138 caps->aux_support = true;
2140 /* From the specification (CTA-861-G), for calculating the maximum
2141 * luminance we need to use:
2142 * Luminance = 50*2**(CV/32)
2143 * Where CV is a one-byte value.
2144 * For calculating this expression we may need float point precision;
2145 * to avoid this complexity level, we take advantage that CV is divided
2146 * by a constant. From the Euclids division algorithm, we know that CV
2147 * can be written as: CV = 32*q + r. Next, we replace CV in the
2148 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2149 * need to pre-compute the value of r/32. For pre-computing the values
2150 * We just used the following Ruby line:
2151 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2152 * The results of the above expressions can be verified at
2153 * pre_computed_values.
2157 max = (1 << q) * pre_computed_values[r];
2159 // min luminance: maxLum * (CV/255)^2 / 100
2160 q = DIV_ROUND_CLOSEST(min_cll, 255);
2161 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2163 caps->aux_max_input_signal = max;
2164 caps->aux_min_input_signal = min;
2167 void amdgpu_dm_update_connector_after_detect(
2168 struct amdgpu_dm_connector *aconnector)
2170 struct drm_connector *connector = &aconnector->base;
2171 struct drm_device *dev = connector->dev;
2172 struct dc_sink *sink;
2174 /* MST handled by drm_mst framework */
2175 if (aconnector->mst_mgr.mst_state == true)
2178 sink = aconnector->dc_link->local_sink;
2180 dc_sink_retain(sink);
2183 * Edid mgmt connector gets first update only in mode_valid hook and then
2184 * the connector sink is set to either fake or physical sink depends on link status.
2185 * Skip if already done during boot.
2187 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2188 && aconnector->dc_em_sink) {
2191 * For S3 resume with headless use eml_sink to fake stream
2192 * because on resume connector->sink is set to NULL
2194 mutex_lock(&dev->mode_config.mutex);
2197 if (aconnector->dc_sink) {
2198 amdgpu_dm_update_freesync_caps(connector, NULL);
2200 * retain and release below are used to
2201 * bump up refcount for sink because the link doesn't point
2202 * to it anymore after disconnect, so on next crtc to connector
2203 * reshuffle by UMD we will get into unwanted dc_sink release
2205 dc_sink_release(aconnector->dc_sink);
2207 aconnector->dc_sink = sink;
2208 dc_sink_retain(aconnector->dc_sink);
2209 amdgpu_dm_update_freesync_caps(connector,
2212 amdgpu_dm_update_freesync_caps(connector, NULL);
2213 if (!aconnector->dc_sink) {
2214 aconnector->dc_sink = aconnector->dc_em_sink;
2215 dc_sink_retain(aconnector->dc_sink);
2219 mutex_unlock(&dev->mode_config.mutex);
2222 dc_sink_release(sink);
2227 * TODO: temporary guard to look for proper fix
2228 * if this sink is MST sink, we should not do anything
2230 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2231 dc_sink_release(sink);
2235 if (aconnector->dc_sink == sink) {
2237 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2240 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2241 aconnector->connector_id);
2243 dc_sink_release(sink);
2247 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2248 aconnector->connector_id, aconnector->dc_sink, sink);
2250 mutex_lock(&dev->mode_config.mutex);
2253 * 1. Update status of the drm connector
2254 * 2. Send an event and let userspace tell us what to do
2258 * TODO: check if we still need the S3 mode update workaround.
2259 * If yes, put it here.
2261 if (aconnector->dc_sink)
2262 amdgpu_dm_update_freesync_caps(connector, NULL);
2264 aconnector->dc_sink = sink;
2265 dc_sink_retain(aconnector->dc_sink);
2266 if (sink->dc_edid.length == 0) {
2267 aconnector->edid = NULL;
2268 if (aconnector->dc_link->aux_mode) {
2269 drm_dp_cec_unset_edid(
2270 &aconnector->dm_dp_aux.aux);
2274 (struct edid *)sink->dc_edid.raw_edid;
2276 drm_connector_update_edid_property(connector,
2278 drm_add_edid_modes(connector, aconnector->edid);
2280 if (aconnector->dc_link->aux_mode)
2281 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2285 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2286 update_connector_ext_caps(aconnector);
2288 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2289 amdgpu_dm_update_freesync_caps(connector, NULL);
2290 drm_connector_update_edid_property(connector, NULL);
2291 aconnector->num_modes = 0;
2292 dc_sink_release(aconnector->dc_sink);
2293 aconnector->dc_sink = NULL;
2294 aconnector->edid = NULL;
2295 #ifdef CONFIG_DRM_AMD_DC_HDCP
2296 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2297 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2298 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2302 mutex_unlock(&dev->mode_config.mutex);
2304 update_subconnector_property(aconnector);
2307 dc_sink_release(sink);
2310 static void handle_hpd_irq(void *param)
2312 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2313 struct drm_connector *connector = &aconnector->base;
2314 struct drm_device *dev = connector->dev;
2315 enum dc_connection_type new_connection_type = dc_connection_none;
2316 #ifdef CONFIG_DRM_AMD_DC_HDCP
2317 struct amdgpu_device *adev = drm_to_adev(dev);
2321 * In case of failure or MST no need to update connector status or notify the OS
2322 * since (for MST case) MST does this in its own context.
2324 mutex_lock(&aconnector->hpd_lock);
2326 #ifdef CONFIG_DRM_AMD_DC_HDCP
2327 if (adev->dm.hdcp_workqueue)
2328 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2330 if (aconnector->fake_enable)
2331 aconnector->fake_enable = false;
2333 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2334 DRM_ERROR("KMS: Failed to detect connector\n");
2336 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2337 emulated_link_detect(aconnector->dc_link);
2340 drm_modeset_lock_all(dev);
2341 dm_restore_drm_connector_state(dev, connector);
2342 drm_modeset_unlock_all(dev);
2344 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2345 drm_kms_helper_hotplug_event(dev);
2347 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2348 amdgpu_dm_update_connector_after_detect(aconnector);
2351 drm_modeset_lock_all(dev);
2352 dm_restore_drm_connector_state(dev, connector);
2353 drm_modeset_unlock_all(dev);
2355 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2356 drm_kms_helper_hotplug_event(dev);
2358 mutex_unlock(&aconnector->hpd_lock);
2362 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2364 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2366 bool new_irq_handled = false;
2368 int dpcd_bytes_to_read;
2370 const int max_process_count = 30;
2371 int process_count = 0;
2373 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2375 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2376 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2377 /* DPCD 0x200 - 0x201 for downstream IRQ */
2378 dpcd_addr = DP_SINK_COUNT;
2380 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2381 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2382 dpcd_addr = DP_SINK_COUNT_ESI;
2385 dret = drm_dp_dpcd_read(
2386 &aconnector->dm_dp_aux.aux,
2389 dpcd_bytes_to_read);
2391 while (dret == dpcd_bytes_to_read &&
2392 process_count < max_process_count) {
2398 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2399 /* handle HPD short pulse irq */
2400 if (aconnector->mst_mgr.mst_state)
2402 &aconnector->mst_mgr,
2406 if (new_irq_handled) {
2407 /* ACK at DPCD to notify down stream */
2408 const int ack_dpcd_bytes_to_write =
2409 dpcd_bytes_to_read - 1;
2411 for (retry = 0; retry < 3; retry++) {
2414 wret = drm_dp_dpcd_write(
2415 &aconnector->dm_dp_aux.aux,
2418 ack_dpcd_bytes_to_write);
2419 if (wret == ack_dpcd_bytes_to_write)
2423 /* check if there is new irq to be handled */
2424 dret = drm_dp_dpcd_read(
2425 &aconnector->dm_dp_aux.aux,
2428 dpcd_bytes_to_read);
2430 new_irq_handled = false;
2436 if (process_count == max_process_count)
2437 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2440 static void handle_hpd_rx_irq(void *param)
2442 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443 struct drm_connector *connector = &aconnector->base;
2444 struct drm_device *dev = connector->dev;
2445 struct dc_link *dc_link = aconnector->dc_link;
2446 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2447 enum dc_connection_type new_connection_type = dc_connection_none;
2448 #ifdef CONFIG_DRM_AMD_DC_HDCP
2449 union hpd_irq_data hpd_irq_data;
2450 struct amdgpu_device *adev = drm_to_adev(dev);
2452 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2456 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2457 * conflict, after implement i2c helper, this mutex should be
2460 if (dc_link->type != dc_connection_mst_branch)
2461 mutex_lock(&aconnector->hpd_lock);
2464 #ifdef CONFIG_DRM_AMD_DC_HDCP
2465 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2467 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2469 !is_mst_root_connector) {
2470 /* Downstream Port status changed. */
2471 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2472 DRM_ERROR("KMS: Failed to detect connector\n");
2474 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 emulated_link_detect(dc_link);
2477 if (aconnector->fake_enable)
2478 aconnector->fake_enable = false;
2480 amdgpu_dm_update_connector_after_detect(aconnector);
2483 drm_modeset_lock_all(dev);
2484 dm_restore_drm_connector_state(dev, connector);
2485 drm_modeset_unlock_all(dev);
2487 drm_kms_helper_hotplug_event(dev);
2488 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2490 if (aconnector->fake_enable)
2491 aconnector->fake_enable = false;
2493 amdgpu_dm_update_connector_after_detect(aconnector);
2496 drm_modeset_lock_all(dev);
2497 dm_restore_drm_connector_state(dev, connector);
2498 drm_modeset_unlock_all(dev);
2500 drm_kms_helper_hotplug_event(dev);
2503 #ifdef CONFIG_DRM_AMD_DC_HDCP
2504 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2505 if (adev->dm.hdcp_workqueue)
2506 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2509 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2510 (dc_link->type == dc_connection_mst_branch))
2511 dm_handle_hpd_rx_irq(aconnector);
2513 if (dc_link->type != dc_connection_mst_branch) {
2514 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2515 mutex_unlock(&aconnector->hpd_lock);
2519 static void register_hpd_handlers(struct amdgpu_device *adev)
2521 struct drm_device *dev = adev_to_drm(adev);
2522 struct drm_connector *connector;
2523 struct amdgpu_dm_connector *aconnector;
2524 const struct dc_link *dc_link;
2525 struct dc_interrupt_params int_params = {0};
2527 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2528 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2530 list_for_each_entry(connector,
2531 &dev->mode_config.connector_list, head) {
2533 aconnector = to_amdgpu_dm_connector(connector);
2534 dc_link = aconnector->dc_link;
2536 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2537 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2538 int_params.irq_source = dc_link->irq_source_hpd;
2540 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2542 (void *) aconnector);
2545 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2547 /* Also register for DP short pulse (hpd_rx). */
2548 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2549 int_params.irq_source = dc_link->irq_source_hpd_rx;
2551 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2553 (void *) aconnector);
2558 #if defined(CONFIG_DRM_AMD_DC_SI)
2559 /* Register IRQ sources and initialize IRQ callbacks */
2560 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2562 struct dc *dc = adev->dm.dc;
2563 struct common_irq_params *c_irq_params;
2564 struct dc_interrupt_params int_params = {0};
2567 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2569 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2570 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2573 * Actions of amdgpu_irq_add_id():
2574 * 1. Register a set() function with base driver.
2575 * Base driver will call set() function to enable/disable an
2576 * interrupt in DC hardware.
2577 * 2. Register amdgpu_dm_irq_handler().
2578 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2579 * coming from DC hardware.
2580 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2581 * for acknowledging and handling. */
2583 /* Use VBLANK interrupt */
2584 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2585 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2587 DRM_ERROR("Failed to add crtc irq id!\n");
2591 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592 int_params.irq_source =
2593 dc_interrupt_to_irq_source(dc, i+1 , 0);
2595 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2597 c_irq_params->adev = adev;
2598 c_irq_params->irq_src = int_params.irq_source;
2600 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2601 dm_crtc_high_irq, c_irq_params);
2604 /* Use GRPH_PFLIP interrupt */
2605 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2606 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2607 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2609 DRM_ERROR("Failed to add page flip irq id!\n");
2613 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2614 int_params.irq_source =
2615 dc_interrupt_to_irq_source(dc, i, 0);
2617 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2619 c_irq_params->adev = adev;
2620 c_irq_params->irq_src = int_params.irq_source;
2622 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2623 dm_pflip_high_irq, c_irq_params);
2628 r = amdgpu_irq_add_id(adev, client_id,
2629 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2631 DRM_ERROR("Failed to add hpd irq id!\n");
2635 register_hpd_handlers(adev);
2641 /* Register IRQ sources and initialize IRQ callbacks */
2642 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2644 struct dc *dc = adev->dm.dc;
2645 struct common_irq_params *c_irq_params;
2646 struct dc_interrupt_params int_params = {0};
2649 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2651 if (adev->asic_type >= CHIP_VEGA10)
2652 client_id = SOC15_IH_CLIENTID_DCE;
2654 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2655 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2658 * Actions of amdgpu_irq_add_id():
2659 * 1. Register a set() function with base driver.
2660 * Base driver will call set() function to enable/disable an
2661 * interrupt in DC hardware.
2662 * 2. Register amdgpu_dm_irq_handler().
2663 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2664 * coming from DC hardware.
2665 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2666 * for acknowledging and handling. */
2668 /* Use VBLANK interrupt */
2669 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2670 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2672 DRM_ERROR("Failed to add crtc irq id!\n");
2676 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2677 int_params.irq_source =
2678 dc_interrupt_to_irq_source(dc, i, 0);
2680 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2682 c_irq_params->adev = adev;
2683 c_irq_params->irq_src = int_params.irq_source;
2685 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2686 dm_crtc_high_irq, c_irq_params);
2689 /* Use VUPDATE interrupt */
2690 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2691 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2693 DRM_ERROR("Failed to add vupdate irq id!\n");
2697 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2698 int_params.irq_source =
2699 dc_interrupt_to_irq_source(dc, i, 0);
2701 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2703 c_irq_params->adev = adev;
2704 c_irq_params->irq_src = int_params.irq_source;
2706 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707 dm_vupdate_high_irq, c_irq_params);
2710 /* Use GRPH_PFLIP interrupt */
2711 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2712 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2713 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2715 DRM_ERROR("Failed to add page flip irq id!\n");
2719 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2720 int_params.irq_source =
2721 dc_interrupt_to_irq_source(dc, i, 0);
2723 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2725 c_irq_params->adev = adev;
2726 c_irq_params->irq_src = int_params.irq_source;
2728 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2729 dm_pflip_high_irq, c_irq_params);
2734 r = amdgpu_irq_add_id(adev, client_id,
2735 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2737 DRM_ERROR("Failed to add hpd irq id!\n");
2741 register_hpd_handlers(adev);
2746 #if defined(CONFIG_DRM_AMD_DC_DCN)
2747 /* Register IRQ sources and initialize IRQ callbacks */
2748 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2750 struct dc *dc = adev->dm.dc;
2751 struct common_irq_params *c_irq_params;
2752 struct dc_interrupt_params int_params = {0};
2756 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2757 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2760 * Actions of amdgpu_irq_add_id():
2761 * 1. Register a set() function with base driver.
2762 * Base driver will call set() function to enable/disable an
2763 * interrupt in DC hardware.
2764 * 2. Register amdgpu_dm_irq_handler().
2765 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2766 * coming from DC hardware.
2767 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2768 * for acknowledging and handling.
2771 /* Use VSTARTUP interrupt */
2772 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2773 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2775 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2778 DRM_ERROR("Failed to add crtc irq id!\n");
2782 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2783 int_params.irq_source =
2784 dc_interrupt_to_irq_source(dc, i, 0);
2786 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2788 c_irq_params->adev = adev;
2789 c_irq_params->irq_src = int_params.irq_source;
2791 amdgpu_dm_irq_register_interrupt(
2792 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2795 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2796 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2797 * to trigger at end of each vblank, regardless of state of the lock,
2798 * matching DCE behaviour.
2800 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2801 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2803 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2806 DRM_ERROR("Failed to add vupdate irq id!\n");
2810 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811 int_params.irq_source =
2812 dc_interrupt_to_irq_source(dc, i, 0);
2814 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2816 c_irq_params->adev = adev;
2817 c_irq_params->irq_src = int_params.irq_source;
2819 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820 dm_vupdate_high_irq, c_irq_params);
2823 /* Use GRPH_PFLIP interrupt */
2824 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2825 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2827 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2829 DRM_ERROR("Failed to add page flip irq id!\n");
2833 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2834 int_params.irq_source =
2835 dc_interrupt_to_irq_source(dc, i, 0);
2837 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2839 c_irq_params->adev = adev;
2840 c_irq_params->irq_src = int_params.irq_source;
2842 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2843 dm_pflip_high_irq, c_irq_params);
2848 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2851 DRM_ERROR("Failed to add hpd irq id!\n");
2855 register_hpd_handlers(adev);
2862 * Acquires the lock for the atomic state object and returns
2863 * the new atomic state.
2865 * This should only be called during atomic check.
2867 static int dm_atomic_get_state(struct drm_atomic_state *state,
2868 struct dm_atomic_state **dm_state)
2870 struct drm_device *dev = state->dev;
2871 struct amdgpu_device *adev = drm_to_adev(dev);
2872 struct amdgpu_display_manager *dm = &adev->dm;
2873 struct drm_private_state *priv_state;
2878 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2879 if (IS_ERR(priv_state))
2880 return PTR_ERR(priv_state);
2882 *dm_state = to_dm_atomic_state(priv_state);
2887 static struct dm_atomic_state *
2888 dm_atomic_get_new_state(struct drm_atomic_state *state)
2890 struct drm_device *dev = state->dev;
2891 struct amdgpu_device *adev = drm_to_adev(dev);
2892 struct amdgpu_display_manager *dm = &adev->dm;
2893 struct drm_private_obj *obj;
2894 struct drm_private_state *new_obj_state;
2897 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2898 if (obj->funcs == dm->atomic_obj.funcs)
2899 return to_dm_atomic_state(new_obj_state);
2905 static struct drm_private_state *
2906 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2908 struct dm_atomic_state *old_state, *new_state;
2910 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2914 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2916 old_state = to_dm_atomic_state(obj->state);
2918 if (old_state && old_state->context)
2919 new_state->context = dc_copy_state(old_state->context);
2921 if (!new_state->context) {
2926 return &new_state->base;
2929 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2930 struct drm_private_state *state)
2932 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2934 if (dm_state && dm_state->context)
2935 dc_release_state(dm_state->context);
2940 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2941 .atomic_duplicate_state = dm_atomic_duplicate_state,
2942 .atomic_destroy_state = dm_atomic_destroy_state,
2945 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2947 struct dm_atomic_state *state;
2950 adev->mode_info.mode_config_initialized = true;
2952 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2953 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2955 adev_to_drm(adev)->mode_config.max_width = 16384;
2956 adev_to_drm(adev)->mode_config.max_height = 16384;
2958 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2959 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2960 /* indicates support for immediate flip */
2961 adev_to_drm(adev)->mode_config.async_page_flip = true;
2963 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2965 state = kzalloc(sizeof(*state), GFP_KERNEL);
2969 state->context = dc_create_state(adev->dm.dc);
2970 if (!state->context) {
2975 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2977 drm_atomic_private_obj_init(adev_to_drm(adev),
2978 &adev->dm.atomic_obj,
2980 &dm_atomic_state_funcs);
2982 r = amdgpu_display_modeset_create_props(adev);
2984 dc_release_state(state->context);
2989 r = amdgpu_dm_audio_init(adev);
2991 dc_release_state(state->context);
2999 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3000 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3001 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3003 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3004 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3006 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3008 #if defined(CONFIG_ACPI)
3009 struct amdgpu_dm_backlight_caps caps;
3011 memset(&caps, 0, sizeof(caps));
3013 if (dm->backlight_caps.caps_valid)
3016 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3017 if (caps.caps_valid) {
3018 dm->backlight_caps.caps_valid = true;
3019 if (caps.aux_support)
3021 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3022 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3024 dm->backlight_caps.min_input_signal =
3025 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3026 dm->backlight_caps.max_input_signal =
3027 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3030 if (dm->backlight_caps.aux_support)
3033 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3034 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3038 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3045 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3046 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3051 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3052 unsigned *min, unsigned *max)
3057 if (caps->aux_support) {
3058 // Firmware limits are in nits, DC API wants millinits.
3059 *max = 1000 * caps->aux_max_input_signal;
3060 *min = 1000 * caps->aux_min_input_signal;
3062 // Firmware limits are 8-bit, PWM control is 16-bit.
3063 *max = 0x101 * caps->max_input_signal;
3064 *min = 0x101 * caps->min_input_signal;
3069 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3070 uint32_t brightness)
3074 if (!get_brightness_range(caps, &min, &max))
3077 // Rescale 0..255 to min..max
3078 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3079 AMDGPU_MAX_BL_LEVEL);
3082 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3083 uint32_t brightness)
3087 if (!get_brightness_range(caps, &min, &max))
3090 if (brightness < min)
3092 // Rescale min..max to 0..255
3093 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3097 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3099 struct amdgpu_display_manager *dm = bl_get_data(bd);
3100 struct amdgpu_dm_backlight_caps caps;
3101 struct dc_link *link = NULL;
3105 amdgpu_dm_update_backlight_caps(dm);
3106 caps = dm->backlight_caps;
3108 link = (struct dc_link *)dm->backlight_link;
3110 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3111 // Change brightness based on AUX property
3112 if (caps.aux_support)
3113 return set_backlight_via_aux(link, brightness);
3115 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3120 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3122 struct amdgpu_display_manager *dm = bl_get_data(bd);
3123 int ret = dc_link_get_backlight_level(dm->backlight_link);
3125 if (ret == DC_ERROR_UNEXPECTED)
3126 return bd->props.brightness;
3127 return convert_brightness_to_user(&dm->backlight_caps, ret);
3130 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3131 .options = BL_CORE_SUSPENDRESUME,
3132 .get_brightness = amdgpu_dm_backlight_get_brightness,
3133 .update_status = amdgpu_dm_backlight_update_status,
3137 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3140 struct backlight_properties props = { 0 };
3142 amdgpu_dm_update_backlight_caps(dm);
3144 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3145 props.brightness = AMDGPU_MAX_BL_LEVEL;
3146 props.type = BACKLIGHT_RAW;
3148 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3149 adev_to_drm(dm->adev)->primary->index);
3151 dm->backlight_dev = backlight_device_register(bl_name,
3152 adev_to_drm(dm->adev)->dev,
3154 &amdgpu_dm_backlight_ops,
3157 if (IS_ERR(dm->backlight_dev))
3158 DRM_ERROR("DM: Backlight registration failed!\n");
3160 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3165 static int initialize_plane(struct amdgpu_display_manager *dm,
3166 struct amdgpu_mode_info *mode_info, int plane_id,
3167 enum drm_plane_type plane_type,
3168 const struct dc_plane_cap *plane_cap)
3170 struct drm_plane *plane;
3171 unsigned long possible_crtcs;
3174 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3176 DRM_ERROR("KMS: Failed to allocate plane\n");
3179 plane->type = plane_type;
3182 * HACK: IGT tests expect that the primary plane for a CRTC
3183 * can only have one possible CRTC. Only expose support for
3184 * any CRTC if they're not going to be used as a primary plane
3185 * for a CRTC - like overlay or underlay planes.
3187 possible_crtcs = 1 << plane_id;
3188 if (plane_id >= dm->dc->caps.max_streams)
3189 possible_crtcs = 0xff;
3191 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3194 DRM_ERROR("KMS: Failed to initialize plane\n");
3200 mode_info->planes[plane_id] = plane;
3206 static void register_backlight_device(struct amdgpu_display_manager *dm,
3207 struct dc_link *link)
3209 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3210 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3212 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3213 link->type != dc_connection_none) {
3215 * Event if registration failed, we should continue with
3216 * DM initialization because not having a backlight control
3217 * is better then a black screen.
3219 amdgpu_dm_register_backlight_device(dm);
3221 if (dm->backlight_dev)
3222 dm->backlight_link = link;
3229 * In this architecture, the association
3230 * connector -> encoder -> crtc
3231 * id not really requried. The crtc and connector will hold the
3232 * display_index as an abstraction to use with DAL component
3234 * Returns 0 on success
3236 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3238 struct amdgpu_display_manager *dm = &adev->dm;
3240 struct amdgpu_dm_connector *aconnector = NULL;
3241 struct amdgpu_encoder *aencoder = NULL;
3242 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3244 int32_t primary_planes;
3245 enum dc_connection_type new_connection_type = dc_connection_none;
3246 const struct dc_plane_cap *plane;
3248 link_cnt = dm->dc->caps.max_links;
3249 if (amdgpu_dm_mode_config_init(dm->adev)) {
3250 DRM_ERROR("DM: Failed to initialize mode config\n");
3254 /* There is one primary plane per CRTC */
3255 primary_planes = dm->dc->caps.max_streams;
3256 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3259 * Initialize primary planes, implicit planes for legacy IOCTLS.
3260 * Order is reversed to match iteration order in atomic check.
3262 for (i = (primary_planes - 1); i >= 0; i--) {
3263 plane = &dm->dc->caps.planes[i];
3265 if (initialize_plane(dm, mode_info, i,
3266 DRM_PLANE_TYPE_PRIMARY, plane)) {
3267 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3273 * Initialize overlay planes, index starting after primary planes.
3274 * These planes have a higher DRM index than the primary planes since
3275 * they should be considered as having a higher z-order.
3276 * Order is reversed to match iteration order in atomic check.
3278 * Only support DCN for now, and only expose one so we don't encourage
3279 * userspace to use up all the pipes.
3281 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3282 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3284 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3287 if (!plane->blends_with_above || !plane->blends_with_below)
3290 if (!plane->pixel_format_support.argb8888)
3293 if (initialize_plane(dm, NULL, primary_planes + i,
3294 DRM_PLANE_TYPE_OVERLAY, plane)) {
3295 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3299 /* Only create one overlay plane. */
3303 for (i = 0; i < dm->dc->caps.max_streams; i++)
3304 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3305 DRM_ERROR("KMS: Failed to initialize crtc\n");
3309 dm->display_indexes_num = dm->dc->caps.max_streams;
3311 /* loops over all connectors on the board */
3312 for (i = 0; i < link_cnt; i++) {
3313 struct dc_link *link = NULL;
3315 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3317 "KMS: Cannot support more than %d display indexes\n",
3318 AMDGPU_DM_MAX_DISPLAY_INDEX);
3322 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3326 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3330 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3331 DRM_ERROR("KMS: Failed to initialize encoder\n");
3335 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3336 DRM_ERROR("KMS: Failed to initialize connector\n");
3340 link = dc_get_link_at_index(dm->dc, i);
3342 if (!dc_link_detect_sink(link, &new_connection_type))
3343 DRM_ERROR("KMS: Failed to detect connector\n");
3345 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3346 emulated_link_detect(link);
3347 amdgpu_dm_update_connector_after_detect(aconnector);
3349 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3350 amdgpu_dm_update_connector_after_detect(aconnector);
3351 register_backlight_device(dm, link);
3352 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3353 amdgpu_dm_set_psr_caps(link);
3359 /* Software is initialized. Now we can register interrupt handlers. */
3360 switch (adev->asic_type) {
3361 #if defined(CONFIG_DRM_AMD_DC_SI)
3366 if (dce60_register_irq_handlers(dm->adev)) {
3367 DRM_ERROR("DM: Failed to initialize IRQ\n");
3381 case CHIP_POLARIS11:
3382 case CHIP_POLARIS10:
3383 case CHIP_POLARIS12:
3388 if (dce110_register_irq_handlers(dm->adev)) {
3389 DRM_ERROR("DM: Failed to initialize IRQ\n");
3393 #if defined(CONFIG_DRM_AMD_DC_DCN)
3399 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3400 case CHIP_SIENNA_CICHLID:
3401 case CHIP_NAVY_FLOUNDER:
3403 if (dcn10_register_irq_handlers(dm->adev)) {
3404 DRM_ERROR("DM: Failed to initialize IRQ\n");
3410 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3422 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3424 drm_mode_config_cleanup(dm->ddev);
3425 drm_atomic_private_obj_fini(&dm->atomic_obj);
3429 /******************************************************************************
3430 * amdgpu_display_funcs functions
3431 *****************************************************************************/
3434 * dm_bandwidth_update - program display watermarks
3436 * @adev: amdgpu_device pointer
3438 * Calculate and program the display watermarks and line buffer allocation.
3440 static void dm_bandwidth_update(struct amdgpu_device *adev)
3442 /* TODO: implement later */
3445 static const struct amdgpu_display_funcs dm_display_funcs = {
3446 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3447 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3448 .backlight_set_level = NULL, /* never called for DC */
3449 .backlight_get_level = NULL, /* never called for DC */
3450 .hpd_sense = NULL,/* called unconditionally */
3451 .hpd_set_polarity = NULL, /* called unconditionally */
3452 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3453 .page_flip_get_scanoutpos =
3454 dm_crtc_get_scanoutpos,/* called unconditionally */
3455 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3456 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3459 #if defined(CONFIG_DEBUG_KERNEL_DC)
3461 static ssize_t s3_debug_store(struct device *device,
3462 struct device_attribute *attr,
3468 struct drm_device *drm_dev = dev_get_drvdata(device);
3469 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3471 ret = kstrtoint(buf, 0, &s3_state);
3476 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3481 return ret == 0 ? count : 0;
3484 DEVICE_ATTR_WO(s3_debug);
3488 static int dm_early_init(void *handle)
3490 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3492 switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3497 adev->mode_info.num_crtc = 6;
3498 adev->mode_info.num_hpd = 6;
3499 adev->mode_info.num_dig = 6;
3502 adev->mode_info.num_crtc = 2;
3503 adev->mode_info.num_hpd = 2;
3504 adev->mode_info.num_dig = 2;
3509 adev->mode_info.num_crtc = 6;
3510 adev->mode_info.num_hpd = 6;
3511 adev->mode_info.num_dig = 6;
3514 adev->mode_info.num_crtc = 4;
3515 adev->mode_info.num_hpd = 6;
3516 adev->mode_info.num_dig = 7;
3520 adev->mode_info.num_crtc = 2;
3521 adev->mode_info.num_hpd = 6;
3522 adev->mode_info.num_dig = 6;
3526 adev->mode_info.num_crtc = 6;
3527 adev->mode_info.num_hpd = 6;
3528 adev->mode_info.num_dig = 7;
3531 adev->mode_info.num_crtc = 3;
3532 adev->mode_info.num_hpd = 6;
3533 adev->mode_info.num_dig = 9;
3536 adev->mode_info.num_crtc = 2;
3537 adev->mode_info.num_hpd = 6;
3538 adev->mode_info.num_dig = 9;
3540 case CHIP_POLARIS11:
3541 case CHIP_POLARIS12:
3542 adev->mode_info.num_crtc = 5;
3543 adev->mode_info.num_hpd = 5;
3544 adev->mode_info.num_dig = 5;
3546 case CHIP_POLARIS10:
3548 adev->mode_info.num_crtc = 6;
3549 adev->mode_info.num_hpd = 6;
3550 adev->mode_info.num_dig = 6;
3555 adev->mode_info.num_crtc = 6;
3556 adev->mode_info.num_hpd = 6;
3557 adev->mode_info.num_dig = 6;
3559 #if defined(CONFIG_DRM_AMD_DC_DCN)
3561 adev->mode_info.num_crtc = 4;
3562 adev->mode_info.num_hpd = 4;
3563 adev->mode_info.num_dig = 4;
3568 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3569 case CHIP_SIENNA_CICHLID:
3570 case CHIP_NAVY_FLOUNDER:
3572 adev->mode_info.num_crtc = 6;
3573 adev->mode_info.num_hpd = 6;
3574 adev->mode_info.num_dig = 6;
3577 adev->mode_info.num_crtc = 5;
3578 adev->mode_info.num_hpd = 5;
3579 adev->mode_info.num_dig = 5;
3582 adev->mode_info.num_crtc = 4;
3583 adev->mode_info.num_hpd = 4;
3584 adev->mode_info.num_dig = 4;
3587 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3591 amdgpu_dm_set_irq_funcs(adev);
3593 if (adev->mode_info.funcs == NULL)
3594 adev->mode_info.funcs = &dm_display_funcs;
3597 * Note: Do NOT change adev->audio_endpt_rreg and
3598 * adev->audio_endpt_wreg because they are initialised in
3599 * amdgpu_device_init()
3601 #if defined(CONFIG_DEBUG_KERNEL_DC)
3603 adev_to_drm(adev)->dev,
3604 &dev_attr_s3_debug);
3610 static bool modeset_required(struct drm_crtc_state *crtc_state,
3611 struct dc_stream_state *new_stream,
3612 struct dc_stream_state *old_stream)
3614 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3617 static bool modereset_required(struct drm_crtc_state *crtc_state)
3619 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3622 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3624 drm_encoder_cleanup(encoder);
3628 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3629 .destroy = amdgpu_dm_encoder_destroy,
3633 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3634 struct dc_scaling_info *scaling_info)
3636 int scale_w, scale_h;
3638 memset(scaling_info, 0, sizeof(*scaling_info));
3640 /* Source is fixed 16.16 but we ignore mantissa for now... */
3641 scaling_info->src_rect.x = state->src_x >> 16;
3642 scaling_info->src_rect.y = state->src_y >> 16;
3644 scaling_info->src_rect.width = state->src_w >> 16;
3645 if (scaling_info->src_rect.width == 0)
3648 scaling_info->src_rect.height = state->src_h >> 16;
3649 if (scaling_info->src_rect.height == 0)
3652 scaling_info->dst_rect.x = state->crtc_x;
3653 scaling_info->dst_rect.y = state->crtc_y;
3655 if (state->crtc_w == 0)
3658 scaling_info->dst_rect.width = state->crtc_w;
3660 if (state->crtc_h == 0)
3663 scaling_info->dst_rect.height = state->crtc_h;
3665 /* DRM doesn't specify clipping on destination output. */
3666 scaling_info->clip_rect = scaling_info->dst_rect;
3668 /* TODO: Validate scaling per-format with DC plane caps */
3669 scale_w = scaling_info->dst_rect.width * 1000 /
3670 scaling_info->src_rect.width;
3672 if (scale_w < 250 || scale_w > 16000)
3675 scale_h = scaling_info->dst_rect.height * 1000 /
3676 scaling_info->src_rect.height;
3678 if (scale_h < 250 || scale_h > 16000)
3682 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3683 * assume reasonable defaults based on the format.
3689 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3690 uint64_t *tiling_flags, bool *tmz_surface)
3692 struct amdgpu_bo *rbo;
3697 *tmz_surface = false;
3701 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3702 r = amdgpu_bo_reserve(rbo, false);
3705 /* Don't show error message when returning -ERESTARTSYS */
3706 if (r != -ERESTARTSYS)
3707 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3712 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3715 *tmz_surface = amdgpu_bo_encrypted(rbo);
3717 amdgpu_bo_unreserve(rbo);
3722 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3724 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3726 return offset ? (address + offset * 256) : 0;
3730 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3731 const struct amdgpu_framebuffer *afb,
3732 const enum surface_pixel_format format,
3733 const enum dc_rotation_angle rotation,
3734 const struct plane_size *plane_size,
3735 const union dc_tiling_info *tiling_info,
3736 const uint64_t info,
3737 struct dc_plane_dcc_param *dcc,
3738 struct dc_plane_address *address,
3739 bool force_disable_dcc)
3741 struct dc *dc = adev->dm.dc;
3742 struct dc_dcc_surface_param input;
3743 struct dc_surface_dcc_cap output;
3744 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3745 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3746 uint64_t dcc_address;
3748 memset(&input, 0, sizeof(input));
3749 memset(&output, 0, sizeof(output));
3751 if (force_disable_dcc)
3757 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3760 if (!dc->cap_funcs.get_dcc_compression_cap)
3763 input.format = format;
3764 input.surface_size.width = plane_size->surface_size.width;
3765 input.surface_size.height = plane_size->surface_size.height;
3766 input.swizzle_mode = tiling_info->gfx9.swizzle;
3768 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3769 input.scan = SCAN_DIRECTION_HORIZONTAL;
3770 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3771 input.scan = SCAN_DIRECTION_VERTICAL;
3773 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3776 if (!output.capable)
3779 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3784 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3785 dcc->independent_64b_blks = i64b;
3787 dcc_address = get_dcc_address(afb->address, info);
3788 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3789 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3795 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3796 const struct amdgpu_framebuffer *afb,
3797 const enum surface_pixel_format format,
3798 const enum dc_rotation_angle rotation,
3799 const uint64_t tiling_flags,
3800 union dc_tiling_info *tiling_info,
3801 struct plane_size *plane_size,
3802 struct dc_plane_dcc_param *dcc,
3803 struct dc_plane_address *address,
3805 bool force_disable_dcc)
3807 const struct drm_framebuffer *fb = &afb->base;
3810 memset(tiling_info, 0, sizeof(*tiling_info));
3811 memset(plane_size, 0, sizeof(*plane_size));
3812 memset(dcc, 0, sizeof(*dcc));
3813 memset(address, 0, sizeof(*address));
3815 address->tmz_surface = tmz_surface;
3817 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3818 plane_size->surface_size.x = 0;
3819 plane_size->surface_size.y = 0;
3820 plane_size->surface_size.width = fb->width;
3821 plane_size->surface_size.height = fb->height;
3822 plane_size->surface_pitch =
3823 fb->pitches[0] / fb->format->cpp[0];
3825 address->type = PLN_ADDR_TYPE_GRAPHICS;
3826 address->grph.addr.low_part = lower_32_bits(afb->address);
3827 address->grph.addr.high_part = upper_32_bits(afb->address);
3828 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3829 uint64_t chroma_addr = afb->address + fb->offsets[1];
3831 plane_size->surface_size.x = 0;
3832 plane_size->surface_size.y = 0;
3833 plane_size->surface_size.width = fb->width;
3834 plane_size->surface_size.height = fb->height;
3835 plane_size->surface_pitch =
3836 fb->pitches[0] / fb->format->cpp[0];
3838 plane_size->chroma_size.x = 0;
3839 plane_size->chroma_size.y = 0;
3840 /* TODO: set these based on surface format */
3841 plane_size->chroma_size.width = fb->width / 2;
3842 plane_size->chroma_size.height = fb->height / 2;
3844 plane_size->chroma_pitch =
3845 fb->pitches[1] / fb->format->cpp[1];
3847 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3848 address->video_progressive.luma_addr.low_part =
3849 lower_32_bits(afb->address);
3850 address->video_progressive.luma_addr.high_part =
3851 upper_32_bits(afb->address);
3852 address->video_progressive.chroma_addr.low_part =
3853 lower_32_bits(chroma_addr);
3854 address->video_progressive.chroma_addr.high_part =
3855 upper_32_bits(chroma_addr);
3858 /* Fill GFX8 params */
3859 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3860 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3862 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3863 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3864 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3865 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3866 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3868 /* XXX fix me for VI */
3869 tiling_info->gfx8.num_banks = num_banks;
3870 tiling_info->gfx8.array_mode =
3871 DC_ARRAY_2D_TILED_THIN1;
3872 tiling_info->gfx8.tile_split = tile_split;
3873 tiling_info->gfx8.bank_width = bankw;
3874 tiling_info->gfx8.bank_height = bankh;
3875 tiling_info->gfx8.tile_aspect = mtaspect;
3876 tiling_info->gfx8.tile_mode =
3877 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3878 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3879 == DC_ARRAY_1D_TILED_THIN1) {
3880 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3883 tiling_info->gfx8.pipe_config =
3884 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3886 if (adev->asic_type == CHIP_VEGA10 ||
3887 adev->asic_type == CHIP_VEGA12 ||
3888 adev->asic_type == CHIP_VEGA20 ||
3889 adev->asic_type == CHIP_NAVI10 ||
3890 adev->asic_type == CHIP_NAVI14 ||
3891 adev->asic_type == CHIP_NAVI12 ||
3892 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3893 adev->asic_type == CHIP_SIENNA_CICHLID ||
3894 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3896 adev->asic_type == CHIP_RENOIR ||
3897 adev->asic_type == CHIP_RAVEN) {
3898 /* Fill GFX9 params */
3899 tiling_info->gfx9.num_pipes =
3900 adev->gfx.config.gb_addr_config_fields.num_pipes;
3901 tiling_info->gfx9.num_banks =
3902 adev->gfx.config.gb_addr_config_fields.num_banks;
3903 tiling_info->gfx9.pipe_interleave =
3904 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3905 tiling_info->gfx9.num_shader_engines =
3906 adev->gfx.config.gb_addr_config_fields.num_se;
3907 tiling_info->gfx9.max_compressed_frags =
3908 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3909 tiling_info->gfx9.num_rb_per_se =
3910 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3911 tiling_info->gfx9.swizzle =
3912 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3913 tiling_info->gfx9.shaderEnable = 1;
3915 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3916 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3917 adev->asic_type == CHIP_NAVY_FLOUNDER)
3918 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3920 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3921 plane_size, tiling_info,
3922 tiling_flags, dcc, address,
3932 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3933 bool *per_pixel_alpha, bool *global_alpha,
3934 int *global_alpha_value)
3936 *per_pixel_alpha = false;
3937 *global_alpha = false;
3938 *global_alpha_value = 0xff;
3940 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3943 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3944 static const uint32_t alpha_formats[] = {
3945 DRM_FORMAT_ARGB8888,
3946 DRM_FORMAT_RGBA8888,
3947 DRM_FORMAT_ABGR8888,
3949 uint32_t format = plane_state->fb->format->format;
3952 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3953 if (format == alpha_formats[i]) {
3954 *per_pixel_alpha = true;
3960 if (plane_state->alpha < 0xffff) {
3961 *global_alpha = true;
3962 *global_alpha_value = plane_state->alpha >> 8;
3967 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3968 const enum surface_pixel_format format,
3969 enum dc_color_space *color_space)
3973 *color_space = COLOR_SPACE_SRGB;
3975 /* DRM color properties only affect non-RGB formats. */
3976 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3979 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3981 switch (plane_state->color_encoding) {
3982 case DRM_COLOR_YCBCR_BT601:
3984 *color_space = COLOR_SPACE_YCBCR601;
3986 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3989 case DRM_COLOR_YCBCR_BT709:
3991 *color_space = COLOR_SPACE_YCBCR709;
3993 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3996 case DRM_COLOR_YCBCR_BT2020:
3998 *color_space = COLOR_SPACE_2020_YCBCR;
4011 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4012 const struct drm_plane_state *plane_state,
4013 const uint64_t tiling_flags,
4014 struct dc_plane_info *plane_info,
4015 struct dc_plane_address *address,
4017 bool force_disable_dcc)
4019 const struct drm_framebuffer *fb = plane_state->fb;
4020 const struct amdgpu_framebuffer *afb =
4021 to_amdgpu_framebuffer(plane_state->fb);
4022 struct drm_format_name_buf format_name;
4025 memset(plane_info, 0, sizeof(*plane_info));
4027 switch (fb->format->format) {
4029 plane_info->format =
4030 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4032 case DRM_FORMAT_RGB565:
4033 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4035 case DRM_FORMAT_XRGB8888:
4036 case DRM_FORMAT_ARGB8888:
4037 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4039 case DRM_FORMAT_XRGB2101010:
4040 case DRM_FORMAT_ARGB2101010:
4041 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4043 case DRM_FORMAT_XBGR2101010:
4044 case DRM_FORMAT_ABGR2101010:
4045 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4047 case DRM_FORMAT_XBGR8888:
4048 case DRM_FORMAT_ABGR8888:
4049 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4051 case DRM_FORMAT_NV21:
4052 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4054 case DRM_FORMAT_NV12:
4055 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4057 case DRM_FORMAT_P010:
4058 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4060 case DRM_FORMAT_XRGB16161616F:
4061 case DRM_FORMAT_ARGB16161616F:
4062 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4064 case DRM_FORMAT_XBGR16161616F:
4065 case DRM_FORMAT_ABGR16161616F:
4066 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4070 "Unsupported screen format %s\n",
4071 drm_get_format_name(fb->format->format, &format_name));
4075 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4076 case DRM_MODE_ROTATE_0:
4077 plane_info->rotation = ROTATION_ANGLE_0;
4079 case DRM_MODE_ROTATE_90:
4080 plane_info->rotation = ROTATION_ANGLE_90;
4082 case DRM_MODE_ROTATE_180:
4083 plane_info->rotation = ROTATION_ANGLE_180;
4085 case DRM_MODE_ROTATE_270:
4086 plane_info->rotation = ROTATION_ANGLE_270;
4089 plane_info->rotation = ROTATION_ANGLE_0;
4093 plane_info->visible = true;
4094 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4096 plane_info->layer_index = 0;
4098 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4099 &plane_info->color_space);
4103 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4104 plane_info->rotation, tiling_flags,
4105 &plane_info->tiling_info,
4106 &plane_info->plane_size,
4107 &plane_info->dcc, address, tmz_surface,
4112 fill_blending_from_plane_state(
4113 plane_state, &plane_info->per_pixel_alpha,
4114 &plane_info->global_alpha, &plane_info->global_alpha_value);
4119 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4120 struct dc_plane_state *dc_plane_state,
4121 struct drm_plane_state *plane_state,
4122 struct drm_crtc_state *crtc_state)
4124 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4125 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4126 struct dc_scaling_info scaling_info;
4127 struct dc_plane_info plane_info;
4129 bool force_disable_dcc = false;
4131 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4135 dc_plane_state->src_rect = scaling_info.src_rect;
4136 dc_plane_state->dst_rect = scaling_info.dst_rect;
4137 dc_plane_state->clip_rect = scaling_info.clip_rect;
4138 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4140 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4141 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4142 dm_plane_state->tiling_flags,
4144 &dc_plane_state->address,
4145 dm_plane_state->tmz_surface,
4150 dc_plane_state->format = plane_info.format;
4151 dc_plane_state->color_space = plane_info.color_space;
4152 dc_plane_state->format = plane_info.format;
4153 dc_plane_state->plane_size = plane_info.plane_size;
4154 dc_plane_state->rotation = plane_info.rotation;
4155 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4156 dc_plane_state->stereo_format = plane_info.stereo_format;
4157 dc_plane_state->tiling_info = plane_info.tiling_info;
4158 dc_plane_state->visible = plane_info.visible;
4159 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4160 dc_plane_state->global_alpha = plane_info.global_alpha;
4161 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4162 dc_plane_state->dcc = plane_info.dcc;
4163 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4166 * Always set input transfer function, since plane state is refreshed
4169 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4176 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4177 const struct dm_connector_state *dm_state,
4178 struct dc_stream_state *stream)
4180 enum amdgpu_rmx_type rmx_type;
4182 struct rect src = { 0 }; /* viewport in composition space*/
4183 struct rect dst = { 0 }; /* stream addressable area */
4185 /* no mode. nothing to be done */
4189 /* Full screen scaling by default */
4190 src.width = mode->hdisplay;
4191 src.height = mode->vdisplay;
4192 dst.width = stream->timing.h_addressable;
4193 dst.height = stream->timing.v_addressable;
4196 rmx_type = dm_state->scaling;
4197 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4198 if (src.width * dst.height <
4199 src.height * dst.width) {
4200 /* height needs less upscaling/more downscaling */
4201 dst.width = src.width *
4202 dst.height / src.height;
4204 /* width needs less upscaling/more downscaling */
4205 dst.height = src.height *
4206 dst.width / src.width;
4208 } else if (rmx_type == RMX_CENTER) {
4212 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4213 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4215 if (dm_state->underscan_enable) {
4216 dst.x += dm_state->underscan_hborder / 2;
4217 dst.y += dm_state->underscan_vborder / 2;
4218 dst.width -= dm_state->underscan_hborder;
4219 dst.height -= dm_state->underscan_vborder;
4226 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4227 dst.x, dst.y, dst.width, dst.height);
4231 static enum dc_color_depth
4232 convert_color_depth_from_display_info(const struct drm_connector *connector,
4233 bool is_y420, int requested_bpc)
4240 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4241 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4243 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4245 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4248 bpc = (uint8_t)connector->display_info.bpc;
4249 /* Assume 8 bpc by default if no bpc is specified. */
4250 bpc = bpc ? bpc : 8;
4253 if (requested_bpc > 0) {
4255 * Cap display bpc based on the user requested value.
4257 * The value for state->max_bpc may not correctly updated
4258 * depending on when the connector gets added to the state
4259 * or if this was called outside of atomic check, so it
4260 * can't be used directly.
4262 bpc = min_t(u8, bpc, requested_bpc);
4264 /* Round down to the nearest even number. */
4265 bpc = bpc - (bpc & 1);
4271 * Temporary Work around, DRM doesn't parse color depth for
4272 * EDID revision before 1.4
4273 * TODO: Fix edid parsing
4275 return COLOR_DEPTH_888;
4277 return COLOR_DEPTH_666;
4279 return COLOR_DEPTH_888;
4281 return COLOR_DEPTH_101010;
4283 return COLOR_DEPTH_121212;
4285 return COLOR_DEPTH_141414;
4287 return COLOR_DEPTH_161616;
4289 return COLOR_DEPTH_UNDEFINED;
4293 static enum dc_aspect_ratio
4294 get_aspect_ratio(const struct drm_display_mode *mode_in)
4296 /* 1-1 mapping, since both enums follow the HDMI spec. */
4297 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4300 static enum dc_color_space
4301 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4303 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4305 switch (dc_crtc_timing->pixel_encoding) {
4306 case PIXEL_ENCODING_YCBCR422:
4307 case PIXEL_ENCODING_YCBCR444:
4308 case PIXEL_ENCODING_YCBCR420:
4311 * 27030khz is the separation point between HDTV and SDTV
4312 * according to HDMI spec, we use YCbCr709 and YCbCr601
4315 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4316 if (dc_crtc_timing->flags.Y_ONLY)
4318 COLOR_SPACE_YCBCR709_LIMITED;
4320 color_space = COLOR_SPACE_YCBCR709;
4322 if (dc_crtc_timing->flags.Y_ONLY)
4324 COLOR_SPACE_YCBCR601_LIMITED;
4326 color_space = COLOR_SPACE_YCBCR601;
4331 case PIXEL_ENCODING_RGB:
4332 color_space = COLOR_SPACE_SRGB;
4343 static bool adjust_colour_depth_from_display_info(
4344 struct dc_crtc_timing *timing_out,
4345 const struct drm_display_info *info)
4347 enum dc_color_depth depth = timing_out->display_color_depth;
4350 normalized_clk = timing_out->pix_clk_100hz / 10;
4351 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4352 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4353 normalized_clk /= 2;
4354 /* Adjusting pix clock following on HDMI spec based on colour depth */
4356 case COLOR_DEPTH_888:
4358 case COLOR_DEPTH_101010:
4359 normalized_clk = (normalized_clk * 30) / 24;
4361 case COLOR_DEPTH_121212:
4362 normalized_clk = (normalized_clk * 36) / 24;
4364 case COLOR_DEPTH_161616:
4365 normalized_clk = (normalized_clk * 48) / 24;
4368 /* The above depths are the only ones valid for HDMI. */
4371 if (normalized_clk <= info->max_tmds_clock) {
4372 timing_out->display_color_depth = depth;
4375 } while (--depth > COLOR_DEPTH_666);
4379 static void fill_stream_properties_from_drm_display_mode(
4380 struct dc_stream_state *stream,
4381 const struct drm_display_mode *mode_in,
4382 const struct drm_connector *connector,
4383 const struct drm_connector_state *connector_state,
4384 const struct dc_stream_state *old_stream,
4387 struct dc_crtc_timing *timing_out = &stream->timing;
4388 const struct drm_display_info *info = &connector->display_info;
4389 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4390 struct hdmi_vendor_infoframe hv_frame;
4391 struct hdmi_avi_infoframe avi_frame;
4393 memset(&hv_frame, 0, sizeof(hv_frame));
4394 memset(&avi_frame, 0, sizeof(avi_frame));
4396 timing_out->h_border_left = 0;
4397 timing_out->h_border_right = 0;
4398 timing_out->v_border_top = 0;
4399 timing_out->v_border_bottom = 0;
4400 /* TODO: un-hardcode */
4401 if (drm_mode_is_420_only(info, mode_in)
4402 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4403 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4404 else if (drm_mode_is_420_also(info, mode_in)
4405 && aconnector->force_yuv420_output)
4406 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4407 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4408 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4409 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4411 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4413 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4414 timing_out->display_color_depth = convert_color_depth_from_display_info(
4416 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4418 timing_out->scan_type = SCANNING_TYPE_NODATA;
4419 timing_out->hdmi_vic = 0;
4422 timing_out->vic = old_stream->timing.vic;
4423 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4424 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4426 timing_out->vic = drm_match_cea_mode(mode_in);
4427 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4428 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4429 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4430 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4433 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4434 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4435 timing_out->vic = avi_frame.video_code;
4436 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4437 timing_out->hdmi_vic = hv_frame.vic;
4440 timing_out->h_addressable = mode_in->crtc_hdisplay;
4441 timing_out->h_total = mode_in->crtc_htotal;
4442 timing_out->h_sync_width =
4443 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4444 timing_out->h_front_porch =
4445 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4446 timing_out->v_total = mode_in->crtc_vtotal;
4447 timing_out->v_addressable = mode_in->crtc_vdisplay;
4448 timing_out->v_front_porch =
4449 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4450 timing_out->v_sync_width =
4451 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4452 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4453 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4455 stream->output_color_space = get_output_color_space(timing_out);
4457 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4458 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4459 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4460 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4461 drm_mode_is_420_also(info, mode_in) &&
4462 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4463 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4464 adjust_colour_depth_from_display_info(timing_out, info);
4469 static void fill_audio_info(struct audio_info *audio_info,
4470 const struct drm_connector *drm_connector,
4471 const struct dc_sink *dc_sink)
4474 int cea_revision = 0;
4475 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4477 audio_info->manufacture_id = edid_caps->manufacturer_id;
4478 audio_info->product_id = edid_caps->product_id;
4480 cea_revision = drm_connector->display_info.cea_rev;
4482 strscpy(audio_info->display_name,
4483 edid_caps->display_name,
4484 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4486 if (cea_revision >= 3) {
4487 audio_info->mode_count = edid_caps->audio_mode_count;
4489 for (i = 0; i < audio_info->mode_count; ++i) {
4490 audio_info->modes[i].format_code =
4491 (enum audio_format_code)
4492 (edid_caps->audio_modes[i].format_code);
4493 audio_info->modes[i].channel_count =
4494 edid_caps->audio_modes[i].channel_count;
4495 audio_info->modes[i].sample_rates.all =
4496 edid_caps->audio_modes[i].sample_rate;
4497 audio_info->modes[i].sample_size =
4498 edid_caps->audio_modes[i].sample_size;
4502 audio_info->flags.all = edid_caps->speaker_flags;
4504 /* TODO: We only check for the progressive mode, check for interlace mode too */
4505 if (drm_connector->latency_present[0]) {
4506 audio_info->video_latency = drm_connector->video_latency[0];
4507 audio_info->audio_latency = drm_connector->audio_latency[0];
4510 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4515 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4516 struct drm_display_mode *dst_mode)
4518 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4519 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4520 dst_mode->crtc_clock = src_mode->crtc_clock;
4521 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4522 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4523 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4524 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4525 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4526 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4527 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4528 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4529 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4530 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4531 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4535 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4536 const struct drm_display_mode *native_mode,
4539 if (scale_enabled) {
4540 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4541 } else if (native_mode->clock == drm_mode->clock &&
4542 native_mode->htotal == drm_mode->htotal &&
4543 native_mode->vtotal == drm_mode->vtotal) {
4544 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4546 /* no scaling nor amdgpu inserted, no need to patch */
4550 static struct dc_sink *
4551 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4553 struct dc_sink_init_data sink_init_data = { 0 };
4554 struct dc_sink *sink = NULL;
4555 sink_init_data.link = aconnector->dc_link;
4556 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4558 sink = dc_sink_create(&sink_init_data);
4560 DRM_ERROR("Failed to create sink!\n");
4563 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4568 static void set_multisync_trigger_params(
4569 struct dc_stream_state *stream)
4571 if (stream->triggered_crtc_reset.enabled) {
4572 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4573 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4577 static void set_master_stream(struct dc_stream_state *stream_set[],
4580 int j, highest_rfr = 0, master_stream = 0;
4582 for (j = 0; j < stream_count; j++) {
4583 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4584 int refresh_rate = 0;
4586 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4587 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4588 if (refresh_rate > highest_rfr) {
4589 highest_rfr = refresh_rate;
4594 for (j = 0; j < stream_count; j++) {
4596 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4600 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4604 if (context->stream_count < 2)
4606 for (i = 0; i < context->stream_count ; i++) {
4607 if (!context->streams[i])
4610 * TODO: add a function to read AMD VSDB bits and set
4611 * crtc_sync_master.multi_sync_enabled flag
4612 * For now it's set to false
4614 set_multisync_trigger_params(context->streams[i]);
4616 set_master_stream(context->streams, context->stream_count);
4619 static struct dc_stream_state *
4620 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4621 const struct drm_display_mode *drm_mode,
4622 const struct dm_connector_state *dm_state,
4623 const struct dc_stream_state *old_stream,
4626 struct drm_display_mode *preferred_mode = NULL;
4627 struct drm_connector *drm_connector;
4628 const struct drm_connector_state *con_state =
4629 dm_state ? &dm_state->base : NULL;
4630 struct dc_stream_state *stream = NULL;
4631 struct drm_display_mode mode = *drm_mode;
4632 bool native_mode_found = false;
4633 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4635 int preferred_refresh = 0;
4636 #if defined(CONFIG_DRM_AMD_DC_DCN)
4637 struct dsc_dec_dpcd_caps dsc_caps;
4639 uint32_t link_bandwidth_kbps;
4641 struct dc_sink *sink = NULL;
4642 if (aconnector == NULL) {
4643 DRM_ERROR("aconnector is NULL!\n");
4647 drm_connector = &aconnector->base;
4649 if (!aconnector->dc_sink) {
4650 sink = create_fake_sink(aconnector);
4654 sink = aconnector->dc_sink;
4655 dc_sink_retain(sink);
4658 stream = dc_create_stream_for_sink(sink);
4660 if (stream == NULL) {
4661 DRM_ERROR("Failed to create stream for sink!\n");
4665 stream->dm_stream_context = aconnector;
4667 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4668 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4670 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4671 /* Search for preferred mode */
4672 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4673 native_mode_found = true;
4677 if (!native_mode_found)
4678 preferred_mode = list_first_entry_or_null(
4679 &aconnector->base.modes,
4680 struct drm_display_mode,
4683 mode_refresh = drm_mode_vrefresh(&mode);
4685 if (preferred_mode == NULL) {
4687 * This may not be an error, the use case is when we have no
4688 * usermode calls to reset and set mode upon hotplug. In this
4689 * case, we call set mode ourselves to restore the previous mode
4690 * and the modelist may not be filled in in time.
4692 DRM_DEBUG_DRIVER("No preferred mode found\n");
4694 decide_crtc_timing_for_drm_display_mode(
4695 &mode, preferred_mode,
4696 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4697 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4701 drm_mode_set_crtcinfo(&mode, 0);
4704 * If scaling is enabled and refresh rate didn't change
4705 * we copy the vic and polarities of the old timings
4707 if (!scale || mode_refresh != preferred_refresh)
4708 fill_stream_properties_from_drm_display_mode(stream,
4709 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4711 fill_stream_properties_from_drm_display_mode(stream,
4712 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4714 stream->timing.flags.DSC = 0;
4716 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4717 #if defined(CONFIG_DRM_AMD_DC_DCN)
4718 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4719 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4720 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4723 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4724 dc_link_get_link_cap(aconnector->dc_link));
4726 #if defined(CONFIG_DRM_AMD_DC_DCN)
4727 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4728 /* Set DSC policy according to dsc_clock_en */
4729 dc_dsc_policy_set_enable_dsc_when_not_needed(
4730 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4732 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4734 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4735 link_bandwidth_kbps,
4737 &stream->timing.dsc_cfg))
4738 stream->timing.flags.DSC = 1;
4739 /* Overwrite the stream flag if DSC is enabled through debugfs */
4740 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4741 stream->timing.flags.DSC = 1;
4743 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4744 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4746 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4747 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4749 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4750 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4755 update_stream_scaling_settings(&mode, dm_state, stream);
4758 &stream->audio_info,
4762 update_stream_signal(stream, sink);
4764 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4765 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4767 if (stream->link->psr_settings.psr_feature_enabled) {
4769 // should decide stream support vsc sdp colorimetry capability
4770 // before building vsc info packet
4772 stream->use_vsc_sdp_for_colorimetry = false;
4773 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4774 stream->use_vsc_sdp_for_colorimetry =
4775 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4777 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4778 stream->use_vsc_sdp_for_colorimetry = true;
4780 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4783 dc_sink_release(sink);
4788 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4790 drm_crtc_cleanup(crtc);
4794 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4795 struct drm_crtc_state *state)
4797 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4799 /* TODO Destroy dc_stream objects are stream object is flattened */
4801 dc_stream_release(cur->stream);
4804 __drm_atomic_helper_crtc_destroy_state(state);
4810 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4812 struct dm_crtc_state *state;
4815 dm_crtc_destroy_state(crtc, crtc->state);
4817 state = kzalloc(sizeof(*state), GFP_KERNEL);
4818 if (WARN_ON(!state))
4821 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4824 static struct drm_crtc_state *
4825 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4827 struct dm_crtc_state *state, *cur;
4829 cur = to_dm_crtc_state(crtc->state);
4831 if (WARN_ON(!crtc->state))
4834 state = kzalloc(sizeof(*state), GFP_KERNEL);
4838 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4841 state->stream = cur->stream;
4842 dc_stream_retain(state->stream);
4845 state->active_planes = cur->active_planes;
4846 state->vrr_infopacket = cur->vrr_infopacket;
4847 state->abm_level = cur->abm_level;
4848 state->vrr_supported = cur->vrr_supported;
4849 state->freesync_config = cur->freesync_config;
4850 state->crc_src = cur->crc_src;
4851 state->cm_has_degamma = cur->cm_has_degamma;
4852 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4854 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4856 return &state->base;
4859 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4861 enum dc_irq_source irq_source;
4862 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4863 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4866 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4868 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4870 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4871 acrtc->crtc_id, enable ? "en" : "dis", rc);
4875 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4877 enum dc_irq_source irq_source;
4878 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4879 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4880 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4884 /* vblank irq on -> Only need vupdate irq in vrr mode */
4885 if (amdgpu_dm_vrr_active(acrtc_state))
4886 rc = dm_set_vupdate_irq(crtc, true);
4888 /* vblank irq off -> vupdate irq off */
4889 rc = dm_set_vupdate_irq(crtc, false);
4895 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4896 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4899 static int dm_enable_vblank(struct drm_crtc *crtc)
4901 return dm_set_vblank(crtc, true);
4904 static void dm_disable_vblank(struct drm_crtc *crtc)
4906 dm_set_vblank(crtc, false);
4909 /* Implemented only the options currently availible for the driver */
4910 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4911 .reset = dm_crtc_reset_state,
4912 .destroy = amdgpu_dm_crtc_destroy,
4913 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4914 .set_config = drm_atomic_helper_set_config,
4915 .page_flip = drm_atomic_helper_page_flip,
4916 .atomic_duplicate_state = dm_crtc_duplicate_state,
4917 .atomic_destroy_state = dm_crtc_destroy_state,
4918 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4919 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4920 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4921 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4922 .enable_vblank = dm_enable_vblank,
4923 .disable_vblank = dm_disable_vblank,
4924 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4927 static enum drm_connector_status
4928 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4931 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4935 * 1. This interface is NOT called in context of HPD irq.
4936 * 2. This interface *is called* in context of user-mode ioctl. Which
4937 * makes it a bad place for *any* MST-related activity.
4940 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4941 !aconnector->fake_enable)
4942 connected = (aconnector->dc_sink != NULL);
4944 connected = (aconnector->base.force == DRM_FORCE_ON);
4946 update_subconnector_property(aconnector);
4948 return (connected ? connector_status_connected :
4949 connector_status_disconnected);
4952 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4953 struct drm_connector_state *connector_state,
4954 struct drm_property *property,
4957 struct drm_device *dev = connector->dev;
4958 struct amdgpu_device *adev = drm_to_adev(dev);
4959 struct dm_connector_state *dm_old_state =
4960 to_dm_connector_state(connector->state);
4961 struct dm_connector_state *dm_new_state =
4962 to_dm_connector_state(connector_state);
4966 if (property == dev->mode_config.scaling_mode_property) {
4967 enum amdgpu_rmx_type rmx_type;
4970 case DRM_MODE_SCALE_CENTER:
4971 rmx_type = RMX_CENTER;
4973 case DRM_MODE_SCALE_ASPECT:
4974 rmx_type = RMX_ASPECT;
4976 case DRM_MODE_SCALE_FULLSCREEN:
4977 rmx_type = RMX_FULL;
4979 case DRM_MODE_SCALE_NONE:
4985 if (dm_old_state->scaling == rmx_type)
4988 dm_new_state->scaling = rmx_type;
4990 } else if (property == adev->mode_info.underscan_hborder_property) {
4991 dm_new_state->underscan_hborder = val;
4993 } else if (property == adev->mode_info.underscan_vborder_property) {
4994 dm_new_state->underscan_vborder = val;
4996 } else if (property == adev->mode_info.underscan_property) {
4997 dm_new_state->underscan_enable = val;
4999 } else if (property == adev->mode_info.abm_level_property) {
5000 dm_new_state->abm_level = val;
5007 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5008 const struct drm_connector_state *state,
5009 struct drm_property *property,
5012 struct drm_device *dev = connector->dev;
5013 struct amdgpu_device *adev = drm_to_adev(dev);
5014 struct dm_connector_state *dm_state =
5015 to_dm_connector_state(state);
5018 if (property == dev->mode_config.scaling_mode_property) {
5019 switch (dm_state->scaling) {
5021 *val = DRM_MODE_SCALE_CENTER;
5024 *val = DRM_MODE_SCALE_ASPECT;
5027 *val = DRM_MODE_SCALE_FULLSCREEN;
5031 *val = DRM_MODE_SCALE_NONE;
5035 } else if (property == adev->mode_info.underscan_hborder_property) {
5036 *val = dm_state->underscan_hborder;
5038 } else if (property == adev->mode_info.underscan_vborder_property) {
5039 *val = dm_state->underscan_vborder;
5041 } else if (property == adev->mode_info.underscan_property) {
5042 *val = dm_state->underscan_enable;
5044 } else if (property == adev->mode_info.abm_level_property) {
5045 *val = dm_state->abm_level;
5052 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5054 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5056 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5059 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5061 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5062 const struct dc_link *link = aconnector->dc_link;
5063 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5064 struct amdgpu_display_manager *dm = &adev->dm;
5066 drm_atomic_private_obj_fini(&aconnector->mst_mgr.base);
5067 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5068 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5070 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5071 link->type != dc_connection_none &&
5072 dm->backlight_dev) {
5073 backlight_device_unregister(dm->backlight_dev);
5074 dm->backlight_dev = NULL;
5078 if (aconnector->dc_em_sink)
5079 dc_sink_release(aconnector->dc_em_sink);
5080 aconnector->dc_em_sink = NULL;
5081 if (aconnector->dc_sink)
5082 dc_sink_release(aconnector->dc_sink);
5083 aconnector->dc_sink = NULL;
5085 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5086 drm_connector_unregister(connector);
5087 drm_connector_cleanup(connector);
5088 if (aconnector->i2c) {
5089 i2c_del_adapter(&aconnector->i2c->base);
5090 kfree(aconnector->i2c);
5092 kfree(aconnector->dm_dp_aux.aux.name);
5097 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5099 struct dm_connector_state *state =
5100 to_dm_connector_state(connector->state);
5102 if (connector->state)
5103 __drm_atomic_helper_connector_destroy_state(connector->state);
5107 state = kzalloc(sizeof(*state), GFP_KERNEL);
5110 state->scaling = RMX_OFF;
5111 state->underscan_enable = false;
5112 state->underscan_hborder = 0;
5113 state->underscan_vborder = 0;
5114 state->base.max_requested_bpc = 8;
5115 state->vcpi_slots = 0;
5117 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5118 state->abm_level = amdgpu_dm_abm_level;
5120 __drm_atomic_helper_connector_reset(connector, &state->base);
5124 struct drm_connector_state *
5125 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5127 struct dm_connector_state *state =
5128 to_dm_connector_state(connector->state);
5130 struct dm_connector_state *new_state =
5131 kmemdup(state, sizeof(*state), GFP_KERNEL);
5136 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5138 new_state->freesync_capable = state->freesync_capable;
5139 new_state->abm_level = state->abm_level;
5140 new_state->scaling = state->scaling;
5141 new_state->underscan_enable = state->underscan_enable;
5142 new_state->underscan_hborder = state->underscan_hborder;
5143 new_state->underscan_vborder = state->underscan_vborder;
5144 new_state->vcpi_slots = state->vcpi_slots;
5145 new_state->pbn = state->pbn;
5146 return &new_state->base;
5150 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5152 struct amdgpu_dm_connector *amdgpu_dm_connector =
5153 to_amdgpu_dm_connector(connector);
5156 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5157 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5158 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5159 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5164 #if defined(CONFIG_DEBUG_FS)
5165 connector_debugfs_init(amdgpu_dm_connector);
5171 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5172 .reset = amdgpu_dm_connector_funcs_reset,
5173 .detect = amdgpu_dm_connector_detect,
5174 .fill_modes = drm_helper_probe_single_connector_modes,
5175 .destroy = amdgpu_dm_connector_destroy,
5176 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5177 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5178 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5179 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5180 .late_register = amdgpu_dm_connector_late_register,
5181 .early_unregister = amdgpu_dm_connector_unregister
5184 static int get_modes(struct drm_connector *connector)
5186 return amdgpu_dm_connector_get_modes(connector);
5189 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5191 struct dc_sink_init_data init_params = {
5192 .link = aconnector->dc_link,
5193 .sink_signal = SIGNAL_TYPE_VIRTUAL
5197 if (!aconnector->base.edid_blob_ptr) {
5198 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5199 aconnector->base.name);
5201 aconnector->base.force = DRM_FORCE_OFF;
5202 aconnector->base.override_edid = false;
5206 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5208 aconnector->edid = edid;
5210 aconnector->dc_em_sink = dc_link_add_remote_sink(
5211 aconnector->dc_link,
5213 (edid->extensions + 1) * EDID_LENGTH,
5216 if (aconnector->base.force == DRM_FORCE_ON) {
5217 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5218 aconnector->dc_link->local_sink :
5219 aconnector->dc_em_sink;
5220 dc_sink_retain(aconnector->dc_sink);
5224 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5226 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5229 * In case of headless boot with force on for DP managed connector
5230 * Those settings have to be != 0 to get initial modeset
5232 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5233 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5234 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5238 aconnector->base.override_edid = true;
5239 create_eml_sink(aconnector);
5242 static struct dc_stream_state *
5243 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5244 const struct drm_display_mode *drm_mode,
5245 const struct dm_connector_state *dm_state,
5246 const struct dc_stream_state *old_stream)
5248 struct drm_connector *connector = &aconnector->base;
5249 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5250 struct dc_stream_state *stream;
5251 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5252 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5253 enum dc_status dc_result = DC_OK;
5256 stream = create_stream_for_sink(aconnector, drm_mode,
5257 dm_state, old_stream,
5259 if (stream == NULL) {
5260 DRM_ERROR("Failed to create stream for sink!\n");
5264 dc_result = dc_validate_stream(adev->dm.dc, stream);
5266 if (dc_result != DC_OK) {
5267 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5272 dc_status_to_str(dc_result));
5274 dc_stream_release(stream);
5276 requested_bpc -= 2; /* lower bpc to retry validation */
5279 } while (stream == NULL && requested_bpc >= 6);
5284 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5285 struct drm_display_mode *mode)
5287 int result = MODE_ERROR;
5288 struct dc_sink *dc_sink;
5289 /* TODO: Unhardcode stream count */
5290 struct dc_stream_state *stream;
5291 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5293 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5294 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5298 * Only run this the first time mode_valid is called to initilialize
5301 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5302 !aconnector->dc_em_sink)
5303 handle_edid_mgmt(aconnector);
5305 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5307 if (dc_sink == NULL) {
5308 DRM_ERROR("dc_sink is NULL!\n");
5312 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5314 dc_stream_release(stream);
5319 /* TODO: error handling*/
5323 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5324 struct dc_info_packet *out)
5326 struct hdmi_drm_infoframe frame;
5327 unsigned char buf[30]; /* 26 + 4 */
5331 memset(out, 0, sizeof(*out));
5333 if (!state->hdr_output_metadata)
5336 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5340 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5344 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5348 /* Prepare the infopacket for DC. */
5349 switch (state->connector->connector_type) {
5350 case DRM_MODE_CONNECTOR_HDMIA:
5351 out->hb0 = 0x87; /* type */
5352 out->hb1 = 0x01; /* version */
5353 out->hb2 = 0x1A; /* length */
5354 out->sb[0] = buf[3]; /* checksum */
5358 case DRM_MODE_CONNECTOR_DisplayPort:
5359 case DRM_MODE_CONNECTOR_eDP:
5360 out->hb0 = 0x00; /* sdp id, zero */
5361 out->hb1 = 0x87; /* type */
5362 out->hb2 = 0x1D; /* payload len - 1 */
5363 out->hb3 = (0x13 << 2); /* sdp version */
5364 out->sb[0] = 0x01; /* version */
5365 out->sb[1] = 0x1A; /* length */
5373 memcpy(&out->sb[i], &buf[4], 26);
5376 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5377 sizeof(out->sb), false);
5383 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5384 const struct drm_connector_state *new_state)
5386 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5387 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5389 if (old_blob != new_blob) {
5390 if (old_blob && new_blob &&
5391 old_blob->length == new_blob->length)
5392 return memcmp(old_blob->data, new_blob->data,
5402 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5403 struct drm_atomic_state *state)
5405 struct drm_connector_state *new_con_state =
5406 drm_atomic_get_new_connector_state(state, conn);
5407 struct drm_connector_state *old_con_state =
5408 drm_atomic_get_old_connector_state(state, conn);
5409 struct drm_crtc *crtc = new_con_state->crtc;
5410 struct drm_crtc_state *new_crtc_state;
5416 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5417 struct dc_info_packet hdr_infopacket;
5419 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5423 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5424 if (IS_ERR(new_crtc_state))
5425 return PTR_ERR(new_crtc_state);
5428 * DC considers the stream backends changed if the
5429 * static metadata changes. Forcing the modeset also
5430 * gives a simple way for userspace to switch from
5431 * 8bpc to 10bpc when setting the metadata to enter
5434 * Changing the static metadata after it's been
5435 * set is permissible, however. So only force a
5436 * modeset if we're entering or exiting HDR.
5438 new_crtc_state->mode_changed =
5439 !old_con_state->hdr_output_metadata ||
5440 !new_con_state->hdr_output_metadata;
5446 static const struct drm_connector_helper_funcs
5447 amdgpu_dm_connector_helper_funcs = {
5449 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5450 * modes will be filtered by drm_mode_validate_size(), and those modes
5451 * are missing after user start lightdm. So we need to renew modes list.
5452 * in get_modes call back, not just return the modes count
5454 .get_modes = get_modes,
5455 .mode_valid = amdgpu_dm_connector_mode_valid,
5456 .atomic_check = amdgpu_dm_connector_atomic_check,
5459 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5463 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5465 struct drm_atomic_state *state = new_crtc_state->state;
5466 struct drm_plane *plane;
5469 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5470 struct drm_plane_state *new_plane_state;
5472 /* Cursor planes are "fake". */
5473 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5476 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5478 if (!new_plane_state) {
5480 * The plane is enable on the CRTC and hasn't changed
5481 * state. This means that it previously passed
5482 * validation and is therefore enabled.
5488 /* We need a framebuffer to be considered enabled. */
5489 num_active += (new_plane_state->fb != NULL);
5495 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5496 struct drm_crtc_state *new_crtc_state)
5498 struct dm_crtc_state *dm_new_crtc_state =
5499 to_dm_crtc_state(new_crtc_state);
5501 dm_new_crtc_state->active_planes = 0;
5503 if (!dm_new_crtc_state->stream)
5506 dm_new_crtc_state->active_planes =
5507 count_crtc_active_planes(new_crtc_state);
5510 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5511 struct drm_crtc_state *state)
5513 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5514 struct dc *dc = adev->dm.dc;
5515 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5518 dm_update_crtc_active_planes(crtc, state);
5520 if (unlikely(!dm_crtc_state->stream &&
5521 modeset_required(state, NULL, dm_crtc_state->stream))) {
5527 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5528 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5529 * planes are disabled, which is not supported by the hardware. And there is legacy
5530 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5532 if (state->enable &&
5533 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5536 /* In some use cases, like reset, no stream is attached */
5537 if (!dm_crtc_state->stream)
5540 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5546 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5547 const struct drm_display_mode *mode,
5548 struct drm_display_mode *adjusted_mode)
5553 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5554 .disable = dm_crtc_helper_disable,
5555 .atomic_check = dm_crtc_helper_atomic_check,
5556 .mode_fixup = dm_crtc_helper_mode_fixup,
5557 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5560 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5565 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5567 switch (display_color_depth) {
5568 case COLOR_DEPTH_666:
5570 case COLOR_DEPTH_888:
5572 case COLOR_DEPTH_101010:
5574 case COLOR_DEPTH_121212:
5576 case COLOR_DEPTH_141414:
5578 case COLOR_DEPTH_161616:
5586 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5587 struct drm_crtc_state *crtc_state,
5588 struct drm_connector_state *conn_state)
5590 struct drm_atomic_state *state = crtc_state->state;
5591 struct drm_connector *connector = conn_state->connector;
5592 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5593 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5594 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5595 struct drm_dp_mst_topology_mgr *mst_mgr;
5596 struct drm_dp_mst_port *mst_port;
5597 enum dc_color_depth color_depth;
5599 bool is_y420 = false;
5601 if (!aconnector->port || !aconnector->dc_sink)
5604 mst_port = aconnector->port;
5605 mst_mgr = &aconnector->mst_port->mst_mgr;
5607 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5610 if (!state->duplicated) {
5611 int max_bpc = conn_state->max_requested_bpc;
5612 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5613 aconnector->force_yuv420_output;
5614 color_depth = convert_color_depth_from_display_info(connector,
5617 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5618 clock = adjusted_mode->clock;
5619 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5621 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5624 dm_new_connector_state->pbn,
5625 dm_mst_get_pbn_divider(aconnector->dc_link));
5626 if (dm_new_connector_state->vcpi_slots < 0) {
5627 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5628 return dm_new_connector_state->vcpi_slots;
5633 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5634 .disable = dm_encoder_helper_disable,
5635 .atomic_check = dm_encoder_helper_atomic_check
5638 #if defined(CONFIG_DRM_AMD_DC_DCN)
5639 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5640 struct dc_state *dc_state)
5642 struct dc_stream_state *stream = NULL;
5643 struct drm_connector *connector;
5644 struct drm_connector_state *new_con_state, *old_con_state;
5645 struct amdgpu_dm_connector *aconnector;
5646 struct dm_connector_state *dm_conn_state;
5647 int i, j, clock, bpp;
5648 int vcpi, pbn_div, pbn = 0;
5650 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5652 aconnector = to_amdgpu_dm_connector(connector);
5654 if (!aconnector->port)
5657 if (!new_con_state || !new_con_state->crtc)
5660 dm_conn_state = to_dm_connector_state(new_con_state);
5662 for (j = 0; j < dc_state->stream_count; j++) {
5663 stream = dc_state->streams[j];
5667 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5676 if (stream->timing.flags.DSC != 1) {
5677 drm_dp_mst_atomic_enable_dsc(state,
5685 pbn_div = dm_mst_get_pbn_divider(stream->link);
5686 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5687 clock = stream->timing.pix_clk_100hz / 10;
5688 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5689 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5696 dm_conn_state->pbn = pbn;
5697 dm_conn_state->vcpi_slots = vcpi;
5703 static void dm_drm_plane_reset(struct drm_plane *plane)
5705 struct dm_plane_state *amdgpu_state = NULL;
5708 plane->funcs->atomic_destroy_state(plane, plane->state);
5710 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5711 WARN_ON(amdgpu_state == NULL);
5714 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5717 static struct drm_plane_state *
5718 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5720 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5722 old_dm_plane_state = to_dm_plane_state(plane->state);
5723 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5724 if (!dm_plane_state)
5727 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5729 if (old_dm_plane_state->dc_state) {
5730 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5731 dc_plane_state_retain(dm_plane_state->dc_state);
5734 /* Framebuffer hasn't been updated yet, so retain old flags. */
5735 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5736 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5738 return &dm_plane_state->base;
5741 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5742 struct drm_plane_state *state)
5744 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5746 if (dm_plane_state->dc_state)
5747 dc_plane_state_release(dm_plane_state->dc_state);
5749 drm_atomic_helper_plane_destroy_state(plane, state);
5752 static const struct drm_plane_funcs dm_plane_funcs = {
5753 .update_plane = drm_atomic_helper_update_plane,
5754 .disable_plane = drm_atomic_helper_disable_plane,
5755 .destroy = drm_primary_helper_destroy,
5756 .reset = dm_drm_plane_reset,
5757 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5758 .atomic_destroy_state = dm_drm_plane_destroy_state,
5761 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5762 struct drm_plane_state *new_state)
5764 struct amdgpu_framebuffer *afb;
5765 struct drm_gem_object *obj;
5766 struct amdgpu_device *adev;
5767 struct amdgpu_bo *rbo;
5768 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5769 struct list_head list;
5770 struct ttm_validate_buffer tv;
5771 struct ww_acquire_ctx ticket;
5775 if (!new_state->fb) {
5776 DRM_DEBUG_DRIVER("No FB bound\n");
5780 afb = to_amdgpu_framebuffer(new_state->fb);
5781 obj = new_state->fb->obj[0];
5782 rbo = gem_to_amdgpu_bo(obj);
5783 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5784 INIT_LIST_HEAD(&list);
5788 list_add(&tv.head, &list);
5790 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5792 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5796 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5797 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5799 domain = AMDGPU_GEM_DOMAIN_VRAM;
5801 r = amdgpu_bo_pin(rbo, domain);
5802 if (unlikely(r != 0)) {
5803 if (r != -ERESTARTSYS)
5804 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5805 ttm_eu_backoff_reservation(&ticket, &list);
5809 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5810 if (unlikely(r != 0)) {
5811 amdgpu_bo_unpin(rbo);
5812 ttm_eu_backoff_reservation(&ticket, &list);
5813 DRM_ERROR("%p bind failed\n", rbo);
5817 ttm_eu_backoff_reservation(&ticket, &list);
5819 afb->address = amdgpu_bo_gpu_offset(rbo);
5824 * We don't do surface updates on planes that have been newly created,
5825 * but we also don't have the afb->address during atomic check.
5827 * Fill in buffer attributes depending on the address here, but only on
5828 * newly created planes since they're not being used by DC yet and this
5829 * won't modify global state.
5831 dm_plane_state_old = to_dm_plane_state(plane->state);
5832 dm_plane_state_new = to_dm_plane_state(new_state);
5834 if (dm_plane_state_new->dc_state &&
5835 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5836 struct dc_plane_state *plane_state =
5837 dm_plane_state_new->dc_state;
5838 bool force_disable_dcc = !plane_state->dcc.enable;
5840 fill_plane_buffer_attributes(
5841 adev, afb, plane_state->format, plane_state->rotation,
5842 dm_plane_state_new->tiling_flags,
5843 &plane_state->tiling_info, &plane_state->plane_size,
5844 &plane_state->dcc, &plane_state->address,
5845 dm_plane_state_new->tmz_surface, force_disable_dcc);
5851 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5852 struct drm_plane_state *old_state)
5854 struct amdgpu_bo *rbo;
5860 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5861 r = amdgpu_bo_reserve(rbo, false);
5863 DRM_ERROR("failed to reserve rbo before unpin\n");
5867 amdgpu_bo_unpin(rbo);
5868 amdgpu_bo_unreserve(rbo);
5869 amdgpu_bo_unref(&rbo);
5872 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5873 struct drm_crtc_state *new_crtc_state)
5875 int max_downscale = 0;
5876 int max_upscale = INT_MAX;
5878 /* TODO: These should be checked against DC plane caps */
5879 return drm_atomic_helper_check_plane_state(
5880 state, new_crtc_state, max_downscale, max_upscale, true, true);
5883 static int dm_plane_atomic_check(struct drm_plane *plane,
5884 struct drm_plane_state *state)
5886 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5887 struct dc *dc = adev->dm.dc;
5888 struct dm_plane_state *dm_plane_state;
5889 struct dc_scaling_info scaling_info;
5890 struct drm_crtc_state *new_crtc_state;
5893 dm_plane_state = to_dm_plane_state(state);
5895 if (!dm_plane_state->dc_state)
5899 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5900 if (!new_crtc_state)
5903 ret = dm_plane_helper_check_state(state, new_crtc_state);
5907 ret = fill_dc_scaling_info(state, &scaling_info);
5911 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5917 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5918 struct drm_plane_state *new_plane_state)
5920 /* Only support async updates on cursor planes. */
5921 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5927 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5928 struct drm_plane_state *new_state)
5930 struct drm_plane_state *old_state =
5931 drm_atomic_get_old_plane_state(new_state->state, plane);
5933 swap(plane->state->fb, new_state->fb);
5935 plane->state->src_x = new_state->src_x;
5936 plane->state->src_y = new_state->src_y;
5937 plane->state->src_w = new_state->src_w;
5938 plane->state->src_h = new_state->src_h;
5939 plane->state->crtc_x = new_state->crtc_x;
5940 plane->state->crtc_y = new_state->crtc_y;
5941 plane->state->crtc_w = new_state->crtc_w;
5942 plane->state->crtc_h = new_state->crtc_h;
5944 handle_cursor_update(plane, old_state);
5947 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5948 .prepare_fb = dm_plane_helper_prepare_fb,
5949 .cleanup_fb = dm_plane_helper_cleanup_fb,
5950 .atomic_check = dm_plane_atomic_check,
5951 .atomic_async_check = dm_plane_atomic_async_check,
5952 .atomic_async_update = dm_plane_atomic_async_update
5956 * TODO: these are currently initialized to rgb formats only.
5957 * For future use cases we should either initialize them dynamically based on
5958 * plane capabilities, or initialize this array to all formats, so internal drm
5959 * check will succeed, and let DC implement proper check
5961 static const uint32_t rgb_formats[] = {
5962 DRM_FORMAT_XRGB8888,
5963 DRM_FORMAT_ARGB8888,
5964 DRM_FORMAT_RGBA8888,
5965 DRM_FORMAT_XRGB2101010,
5966 DRM_FORMAT_XBGR2101010,
5967 DRM_FORMAT_ARGB2101010,
5968 DRM_FORMAT_ABGR2101010,
5969 DRM_FORMAT_XBGR8888,
5970 DRM_FORMAT_ABGR8888,
5974 static const uint32_t overlay_formats[] = {
5975 DRM_FORMAT_XRGB8888,
5976 DRM_FORMAT_ARGB8888,
5977 DRM_FORMAT_RGBA8888,
5978 DRM_FORMAT_XBGR8888,
5979 DRM_FORMAT_ABGR8888,
5983 static const u32 cursor_formats[] = {
5987 static int get_plane_formats(const struct drm_plane *plane,
5988 const struct dc_plane_cap *plane_cap,
5989 uint32_t *formats, int max_formats)
5991 int i, num_formats = 0;
5994 * TODO: Query support for each group of formats directly from
5995 * DC plane caps. This will require adding more formats to the
5999 switch (plane->type) {
6000 case DRM_PLANE_TYPE_PRIMARY:
6001 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6002 if (num_formats >= max_formats)
6005 formats[num_formats++] = rgb_formats[i];
6008 if (plane_cap && plane_cap->pixel_format_support.nv12)
6009 formats[num_formats++] = DRM_FORMAT_NV12;
6010 if (plane_cap && plane_cap->pixel_format_support.p010)
6011 formats[num_formats++] = DRM_FORMAT_P010;
6012 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6013 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6014 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6015 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6016 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6020 case DRM_PLANE_TYPE_OVERLAY:
6021 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6022 if (num_formats >= max_formats)
6025 formats[num_formats++] = overlay_formats[i];
6029 case DRM_PLANE_TYPE_CURSOR:
6030 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6031 if (num_formats >= max_formats)
6034 formats[num_formats++] = cursor_formats[i];
6042 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6043 struct drm_plane *plane,
6044 unsigned long possible_crtcs,
6045 const struct dc_plane_cap *plane_cap)
6047 uint32_t formats[32];
6050 unsigned int supported_rotations;
6052 num_formats = get_plane_formats(plane, plane_cap, formats,
6053 ARRAY_SIZE(formats));
6055 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6056 &dm_plane_funcs, formats, num_formats,
6057 NULL, plane->type, NULL);
6061 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6062 plane_cap && plane_cap->per_pixel_alpha) {
6063 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6064 BIT(DRM_MODE_BLEND_PREMULTI);
6066 drm_plane_create_alpha_property(plane);
6067 drm_plane_create_blend_mode_property(plane, blend_caps);
6070 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6072 (plane_cap->pixel_format_support.nv12 ||
6073 plane_cap->pixel_format_support.p010)) {
6074 /* This only affects YUV formats. */
6075 drm_plane_create_color_properties(
6077 BIT(DRM_COLOR_YCBCR_BT601) |
6078 BIT(DRM_COLOR_YCBCR_BT709) |
6079 BIT(DRM_COLOR_YCBCR_BT2020),
6080 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6081 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6082 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6085 supported_rotations =
6086 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6087 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6089 if (dm->adev->asic_type >= CHIP_BONAIRE)
6090 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6091 supported_rotations);
6093 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6095 /* Create (reset) the plane state */
6096 if (plane->funcs->reset)
6097 plane->funcs->reset(plane);
6102 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6103 struct drm_plane *plane,
6104 uint32_t crtc_index)
6106 struct amdgpu_crtc *acrtc = NULL;
6107 struct drm_plane *cursor_plane;
6111 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6115 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6116 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6118 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6122 res = drm_crtc_init_with_planes(
6127 &amdgpu_dm_crtc_funcs, NULL);
6132 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6134 /* Create (reset) the plane state */
6135 if (acrtc->base.funcs->reset)
6136 acrtc->base.funcs->reset(&acrtc->base);
6138 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6139 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6141 acrtc->crtc_id = crtc_index;
6142 acrtc->base.enabled = false;
6143 acrtc->otg_inst = -1;
6145 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6146 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6147 true, MAX_COLOR_LUT_ENTRIES);
6148 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6154 kfree(cursor_plane);
6159 static int to_drm_connector_type(enum signal_type st)
6162 case SIGNAL_TYPE_HDMI_TYPE_A:
6163 return DRM_MODE_CONNECTOR_HDMIA;
6164 case SIGNAL_TYPE_EDP:
6165 return DRM_MODE_CONNECTOR_eDP;
6166 case SIGNAL_TYPE_LVDS:
6167 return DRM_MODE_CONNECTOR_LVDS;
6168 case SIGNAL_TYPE_RGB:
6169 return DRM_MODE_CONNECTOR_VGA;
6170 case SIGNAL_TYPE_DISPLAY_PORT:
6171 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6172 return DRM_MODE_CONNECTOR_DisplayPort;
6173 case SIGNAL_TYPE_DVI_DUAL_LINK:
6174 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6175 return DRM_MODE_CONNECTOR_DVID;
6176 case SIGNAL_TYPE_VIRTUAL:
6177 return DRM_MODE_CONNECTOR_VIRTUAL;
6180 return DRM_MODE_CONNECTOR_Unknown;
6184 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6186 struct drm_encoder *encoder;
6188 /* There is only one encoder per connector */
6189 drm_connector_for_each_possible_encoder(connector, encoder)
6195 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6197 struct drm_encoder *encoder;
6198 struct amdgpu_encoder *amdgpu_encoder;
6200 encoder = amdgpu_dm_connector_to_encoder(connector);
6202 if (encoder == NULL)
6205 amdgpu_encoder = to_amdgpu_encoder(encoder);
6207 amdgpu_encoder->native_mode.clock = 0;
6209 if (!list_empty(&connector->probed_modes)) {
6210 struct drm_display_mode *preferred_mode = NULL;
6212 list_for_each_entry(preferred_mode,
6213 &connector->probed_modes,
6215 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6216 amdgpu_encoder->native_mode = *preferred_mode;
6224 static struct drm_display_mode *
6225 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6227 int hdisplay, int vdisplay)
6229 struct drm_device *dev = encoder->dev;
6230 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6231 struct drm_display_mode *mode = NULL;
6232 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6234 mode = drm_mode_duplicate(dev, native_mode);
6239 mode->hdisplay = hdisplay;
6240 mode->vdisplay = vdisplay;
6241 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6242 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6248 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6249 struct drm_connector *connector)
6251 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6252 struct drm_display_mode *mode = NULL;
6253 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6254 struct amdgpu_dm_connector *amdgpu_dm_connector =
6255 to_amdgpu_dm_connector(connector);
6259 char name[DRM_DISPLAY_MODE_LEN];
6262 } common_modes[] = {
6263 { "640x480", 640, 480},
6264 { "800x600", 800, 600},
6265 { "1024x768", 1024, 768},
6266 { "1280x720", 1280, 720},
6267 { "1280x800", 1280, 800},
6268 {"1280x1024", 1280, 1024},
6269 { "1440x900", 1440, 900},
6270 {"1680x1050", 1680, 1050},
6271 {"1600x1200", 1600, 1200},
6272 {"1920x1080", 1920, 1080},
6273 {"1920x1200", 1920, 1200}
6276 n = ARRAY_SIZE(common_modes);
6278 for (i = 0; i < n; i++) {
6279 struct drm_display_mode *curmode = NULL;
6280 bool mode_existed = false;
6282 if (common_modes[i].w > native_mode->hdisplay ||
6283 common_modes[i].h > native_mode->vdisplay ||
6284 (common_modes[i].w == native_mode->hdisplay &&
6285 common_modes[i].h == native_mode->vdisplay))
6288 list_for_each_entry(curmode, &connector->probed_modes, head) {
6289 if (common_modes[i].w == curmode->hdisplay &&
6290 common_modes[i].h == curmode->vdisplay) {
6291 mode_existed = true;
6299 mode = amdgpu_dm_create_common_mode(encoder,
6300 common_modes[i].name, common_modes[i].w,
6302 drm_mode_probed_add(connector, mode);
6303 amdgpu_dm_connector->num_modes++;
6307 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6310 struct amdgpu_dm_connector *amdgpu_dm_connector =
6311 to_amdgpu_dm_connector(connector);
6314 /* empty probed_modes */
6315 INIT_LIST_HEAD(&connector->probed_modes);
6316 amdgpu_dm_connector->num_modes =
6317 drm_add_edid_modes(connector, edid);
6319 /* sorting the probed modes before calling function
6320 * amdgpu_dm_get_native_mode() since EDID can have
6321 * more than one preferred mode. The modes that are
6322 * later in the probed mode list could be of higher
6323 * and preferred resolution. For example, 3840x2160
6324 * resolution in base EDID preferred timing and 4096x2160
6325 * preferred resolution in DID extension block later.
6327 drm_mode_sort(&connector->probed_modes);
6328 amdgpu_dm_get_native_mode(connector);
6330 amdgpu_dm_connector->num_modes = 0;
6334 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6336 struct amdgpu_dm_connector *amdgpu_dm_connector =
6337 to_amdgpu_dm_connector(connector);
6338 struct drm_encoder *encoder;
6339 struct edid *edid = amdgpu_dm_connector->edid;
6341 encoder = amdgpu_dm_connector_to_encoder(connector);
6343 if (!edid || !drm_edid_is_valid(edid)) {
6344 amdgpu_dm_connector->num_modes =
6345 drm_add_modes_noedid(connector, 640, 480);
6347 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6348 amdgpu_dm_connector_add_common_modes(encoder, connector);
6350 amdgpu_dm_fbc_init(connector);
6352 return amdgpu_dm_connector->num_modes;
6355 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6356 struct amdgpu_dm_connector *aconnector,
6358 struct dc_link *link,
6361 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6364 * Some of the properties below require access to state, like bpc.
6365 * Allocate some default initial connector state with our reset helper.
6367 if (aconnector->base.funcs->reset)
6368 aconnector->base.funcs->reset(&aconnector->base);
6370 aconnector->connector_id = link_index;
6371 aconnector->dc_link = link;
6372 aconnector->base.interlace_allowed = false;
6373 aconnector->base.doublescan_allowed = false;
6374 aconnector->base.stereo_allowed = false;
6375 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6376 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6377 aconnector->audio_inst = -1;
6378 mutex_init(&aconnector->hpd_lock);
6381 * configure support HPD hot plug connector_>polled default value is 0
6382 * which means HPD hot plug not supported
6384 switch (connector_type) {
6385 case DRM_MODE_CONNECTOR_HDMIA:
6386 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6387 aconnector->base.ycbcr_420_allowed =
6388 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6390 case DRM_MODE_CONNECTOR_DisplayPort:
6391 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6392 aconnector->base.ycbcr_420_allowed =
6393 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6395 case DRM_MODE_CONNECTOR_DVID:
6396 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6402 drm_object_attach_property(&aconnector->base.base,
6403 dm->ddev->mode_config.scaling_mode_property,
6404 DRM_MODE_SCALE_NONE);
6406 drm_object_attach_property(&aconnector->base.base,
6407 adev->mode_info.underscan_property,
6409 drm_object_attach_property(&aconnector->base.base,
6410 adev->mode_info.underscan_hborder_property,
6412 drm_object_attach_property(&aconnector->base.base,
6413 adev->mode_info.underscan_vborder_property,
6416 if (!aconnector->mst_port)
6417 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6419 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6420 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6421 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6423 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6424 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6425 drm_object_attach_property(&aconnector->base.base,
6426 adev->mode_info.abm_level_property, 0);
6429 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6430 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6431 connector_type == DRM_MODE_CONNECTOR_eDP) {
6432 drm_object_attach_property(
6433 &aconnector->base.base,
6434 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6436 if (!aconnector->mst_port)
6437 drm_connector_attach_vrr_capable_property(&aconnector->base);
6439 #ifdef CONFIG_DRM_AMD_DC_HDCP
6440 if (adev->dm.hdcp_workqueue)
6441 drm_connector_attach_content_protection_property(&aconnector->base, true);
6446 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6447 struct i2c_msg *msgs, int num)
6449 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6450 struct ddc_service *ddc_service = i2c->ddc_service;
6451 struct i2c_command cmd;
6455 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6460 cmd.number_of_payloads = num;
6461 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6464 for (i = 0; i < num; i++) {
6465 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6466 cmd.payloads[i].address = msgs[i].addr;
6467 cmd.payloads[i].length = msgs[i].len;
6468 cmd.payloads[i].data = msgs[i].buf;
6472 ddc_service->ctx->dc,
6473 ddc_service->ddc_pin->hw_info.ddc_channel,
6477 kfree(cmd.payloads);
6481 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6483 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6486 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6487 .master_xfer = amdgpu_dm_i2c_xfer,
6488 .functionality = amdgpu_dm_i2c_func,
6491 static struct amdgpu_i2c_adapter *
6492 create_i2c(struct ddc_service *ddc_service,
6496 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6497 struct amdgpu_i2c_adapter *i2c;
6499 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6502 i2c->base.owner = THIS_MODULE;
6503 i2c->base.class = I2C_CLASS_DDC;
6504 i2c->base.dev.parent = &adev->pdev->dev;
6505 i2c->base.algo = &amdgpu_dm_i2c_algo;
6506 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6507 i2c_set_adapdata(&i2c->base, i2c);
6508 i2c->ddc_service = ddc_service;
6509 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6516 * Note: this function assumes that dc_link_detect() was called for the
6517 * dc_link which will be represented by this aconnector.
6519 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6520 struct amdgpu_dm_connector *aconnector,
6521 uint32_t link_index,
6522 struct amdgpu_encoder *aencoder)
6526 struct dc *dc = dm->dc;
6527 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6528 struct amdgpu_i2c_adapter *i2c;
6530 link->priv = aconnector;
6532 DRM_DEBUG_DRIVER("%s()\n", __func__);
6534 i2c = create_i2c(link->ddc, link->link_index, &res);
6536 DRM_ERROR("Failed to create i2c adapter data\n");
6540 aconnector->i2c = i2c;
6541 res = i2c_add_adapter(&i2c->base);
6544 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6548 connector_type = to_drm_connector_type(link->connector_signal);
6550 res = drm_connector_init_with_ddc(
6553 &amdgpu_dm_connector_funcs,
6558 DRM_ERROR("connector_init failed\n");
6559 aconnector->connector_id = -1;
6563 drm_connector_helper_add(
6565 &amdgpu_dm_connector_helper_funcs);
6567 amdgpu_dm_connector_init_helper(
6574 drm_connector_attach_encoder(
6575 &aconnector->base, &aencoder->base);
6577 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6578 || connector_type == DRM_MODE_CONNECTOR_eDP)
6579 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6584 aconnector->i2c = NULL;
6589 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6591 switch (adev->mode_info.num_crtc) {
6608 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6609 struct amdgpu_encoder *aencoder,
6610 uint32_t link_index)
6612 struct amdgpu_device *adev = drm_to_adev(dev);
6614 int res = drm_encoder_init(dev,
6616 &amdgpu_dm_encoder_funcs,
6617 DRM_MODE_ENCODER_TMDS,
6620 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6623 aencoder->encoder_id = link_index;
6625 aencoder->encoder_id = -1;
6627 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6632 static void manage_dm_interrupts(struct amdgpu_device *adev,
6633 struct amdgpu_crtc *acrtc,
6637 * We have no guarantee that the frontend index maps to the same
6638 * backend index - some even map to more than one.
6640 * TODO: Use a different interrupt or check DC itself for the mapping.
6643 amdgpu_display_crtc_idx_to_irq_type(
6648 drm_crtc_vblank_on(&acrtc->base);
6651 &adev->pageflip_irq,
6657 &adev->pageflip_irq,
6659 drm_crtc_vblank_off(&acrtc->base);
6663 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6664 struct amdgpu_crtc *acrtc)
6667 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6670 * This reads the current state for the IRQ and force reapplies
6671 * the setting to hardware.
6673 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6677 is_scaling_state_different(const struct dm_connector_state *dm_state,
6678 const struct dm_connector_state *old_dm_state)
6680 if (dm_state->scaling != old_dm_state->scaling)
6682 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6683 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6685 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6686 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6688 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6689 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6694 #ifdef CONFIG_DRM_AMD_DC_HDCP
6695 static bool is_content_protection_different(struct drm_connector_state *state,
6696 const struct drm_connector_state *old_state,
6697 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6699 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6701 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6702 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6703 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6707 /* CP is being re enabled, ignore this */
6708 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6709 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6710 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6714 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6715 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6716 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6717 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6719 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6720 * hot-plug, headless s3, dpms
6722 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6723 aconnector->dc_sink != NULL)
6726 if (old_state->content_protection == state->content_protection)
6729 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6736 static void remove_stream(struct amdgpu_device *adev,
6737 struct amdgpu_crtc *acrtc,
6738 struct dc_stream_state *stream)
6740 /* this is the update mode case */
6742 acrtc->otg_inst = -1;
6743 acrtc->enabled = false;
6746 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6747 struct dc_cursor_position *position)
6749 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6751 int xorigin = 0, yorigin = 0;
6753 position->enable = false;
6757 if (!crtc || !plane->state->fb)
6760 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6761 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6762 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6764 plane->state->crtc_w,
6765 plane->state->crtc_h);
6769 x = plane->state->crtc_x;
6770 y = plane->state->crtc_y;
6772 if (x <= -amdgpu_crtc->max_cursor_width ||
6773 y <= -amdgpu_crtc->max_cursor_height)
6777 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6781 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6784 position->enable = true;
6785 position->translate_by_source = true;
6788 position->x_hotspot = xorigin;
6789 position->y_hotspot = yorigin;
6794 static void handle_cursor_update(struct drm_plane *plane,
6795 struct drm_plane_state *old_plane_state)
6797 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6798 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6799 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6800 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6801 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6802 uint64_t address = afb ? afb->address : 0;
6803 struct dc_cursor_position position;
6804 struct dc_cursor_attributes attributes;
6807 if (!plane->state->fb && !old_plane_state->fb)
6810 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6812 amdgpu_crtc->crtc_id,
6813 plane->state->crtc_w,
6814 plane->state->crtc_h);
6816 ret = get_cursor_position(plane, crtc, &position);
6820 if (!position.enable) {
6821 /* turn off cursor */
6822 if (crtc_state && crtc_state->stream) {
6823 mutex_lock(&adev->dm.dc_lock);
6824 dc_stream_set_cursor_position(crtc_state->stream,
6826 mutex_unlock(&adev->dm.dc_lock);
6831 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6832 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6834 memset(&attributes, 0, sizeof(attributes));
6835 attributes.address.high_part = upper_32_bits(address);
6836 attributes.address.low_part = lower_32_bits(address);
6837 attributes.width = plane->state->crtc_w;
6838 attributes.height = plane->state->crtc_h;
6839 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6840 attributes.rotation_angle = 0;
6841 attributes.attribute_flags.value = 0;
6843 attributes.pitch = attributes.width;
6845 if (crtc_state->stream) {
6846 mutex_lock(&adev->dm.dc_lock);
6847 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6849 DRM_ERROR("DC failed to set cursor attributes\n");
6851 if (!dc_stream_set_cursor_position(crtc_state->stream,
6853 DRM_ERROR("DC failed to set cursor position\n");
6854 mutex_unlock(&adev->dm.dc_lock);
6858 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6861 assert_spin_locked(&acrtc->base.dev->event_lock);
6862 WARN_ON(acrtc->event);
6864 acrtc->event = acrtc->base.state->event;
6866 /* Set the flip status */
6867 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6869 /* Mark this event as consumed */
6870 acrtc->base.state->event = NULL;
6872 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6876 static void update_freesync_state_on_stream(
6877 struct amdgpu_display_manager *dm,
6878 struct dm_crtc_state *new_crtc_state,
6879 struct dc_stream_state *new_stream,
6880 struct dc_plane_state *surface,
6881 u32 flip_timestamp_in_us)
6883 struct mod_vrr_params vrr_params;
6884 struct dc_info_packet vrr_infopacket = {0};
6885 struct amdgpu_device *adev = dm->adev;
6886 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6887 unsigned long flags;
6893 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6894 * For now it's sufficient to just guard against these conditions.
6897 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6900 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6901 vrr_params = acrtc->dm_irq_params.vrr_params;
6904 mod_freesync_handle_preflip(
6905 dm->freesync_module,
6908 flip_timestamp_in_us,
6911 if (adev->family < AMDGPU_FAMILY_AI &&
6912 amdgpu_dm_vrr_active(new_crtc_state)) {
6913 mod_freesync_handle_v_update(dm->freesync_module,
6914 new_stream, &vrr_params);
6916 /* Need to call this before the frame ends. */
6917 dc_stream_adjust_vmin_vmax(dm->dc,
6918 new_crtc_state->stream,
6919 &vrr_params.adjust);
6923 mod_freesync_build_vrr_infopacket(
6924 dm->freesync_module,
6928 TRANSFER_FUNC_UNKNOWN,
6931 new_crtc_state->freesync_timing_changed |=
6932 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6934 sizeof(vrr_params.adjust)) != 0);
6936 new_crtc_state->freesync_vrr_info_changed |=
6937 (memcmp(&new_crtc_state->vrr_infopacket,
6939 sizeof(vrr_infopacket)) != 0);
6941 acrtc->dm_irq_params.vrr_params = vrr_params;
6942 new_crtc_state->vrr_infopacket = vrr_infopacket;
6944 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6945 new_stream->vrr_infopacket = vrr_infopacket;
6947 if (new_crtc_state->freesync_vrr_info_changed)
6948 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6949 new_crtc_state->base.crtc->base.id,
6950 (int)new_crtc_state->base.vrr_enabled,
6951 (int)vrr_params.state);
6953 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6956 static void update_stream_irq_parameters(
6957 struct amdgpu_display_manager *dm,
6958 struct dm_crtc_state *new_crtc_state)
6960 struct dc_stream_state *new_stream = new_crtc_state->stream;
6961 struct mod_vrr_params vrr_params;
6962 struct mod_freesync_config config = new_crtc_state->freesync_config;
6963 struct amdgpu_device *adev = dm->adev;
6964 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6965 unsigned long flags;
6971 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6972 * For now it's sufficient to just guard against these conditions.
6974 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6977 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6978 vrr_params = acrtc->dm_irq_params.vrr_params;
6980 if (new_crtc_state->vrr_supported &&
6981 config.min_refresh_in_uhz &&
6982 config.max_refresh_in_uhz) {
6983 config.state = new_crtc_state->base.vrr_enabled ?
6984 VRR_STATE_ACTIVE_VARIABLE :
6987 config.state = VRR_STATE_UNSUPPORTED;
6990 mod_freesync_build_vrr_params(dm->freesync_module,
6992 &config, &vrr_params);
6994 new_crtc_state->freesync_timing_changed |=
6995 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6996 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
6998 new_crtc_state->freesync_config = config;
6999 /* Copy state for access from DM IRQ handler */
7000 acrtc->dm_irq_params.freesync_config = config;
7001 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7002 acrtc->dm_irq_params.vrr_params = vrr_params;
7003 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7006 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7007 struct dm_crtc_state *new_state)
7009 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7010 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7012 if (!old_vrr_active && new_vrr_active) {
7013 /* Transition VRR inactive -> active:
7014 * While VRR is active, we must not disable vblank irq, as a
7015 * reenable after disable would compute bogus vblank/pflip
7016 * timestamps if it likely happened inside display front-porch.
7018 * We also need vupdate irq for the actual core vblank handling
7021 dm_set_vupdate_irq(new_state->base.crtc, true);
7022 drm_crtc_vblank_get(new_state->base.crtc);
7023 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7024 __func__, new_state->base.crtc->base.id);
7025 } else if (old_vrr_active && !new_vrr_active) {
7026 /* Transition VRR active -> inactive:
7027 * Allow vblank irq disable again for fixed refresh rate.
7029 dm_set_vupdate_irq(new_state->base.crtc, false);
7030 drm_crtc_vblank_put(new_state->base.crtc);
7031 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7032 __func__, new_state->base.crtc->base.id);
7036 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7038 struct drm_plane *plane;
7039 struct drm_plane_state *old_plane_state, *new_plane_state;
7043 * TODO: Make this per-stream so we don't issue redundant updates for
7044 * commits with multiple streams.
7046 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7048 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7049 handle_cursor_update(plane, old_plane_state);
7052 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7053 struct dc_state *dc_state,
7054 struct drm_device *dev,
7055 struct amdgpu_display_manager *dm,
7056 struct drm_crtc *pcrtc,
7057 bool wait_for_vblank)
7060 uint64_t timestamp_ns;
7061 struct drm_plane *plane;
7062 struct drm_plane_state *old_plane_state, *new_plane_state;
7063 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7064 struct drm_crtc_state *new_pcrtc_state =
7065 drm_atomic_get_new_crtc_state(state, pcrtc);
7066 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7067 struct dm_crtc_state *dm_old_crtc_state =
7068 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7069 int planes_count = 0, vpos, hpos;
7071 unsigned long flags;
7072 struct amdgpu_bo *abo;
7073 uint32_t target_vblank, last_flip_vblank;
7074 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7075 bool pflip_present = false;
7077 struct dc_surface_update surface_updates[MAX_SURFACES];
7078 struct dc_plane_info plane_infos[MAX_SURFACES];
7079 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7080 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7081 struct dc_stream_update stream_update;
7084 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7087 dm_error("Failed to allocate update bundle\n");
7092 * Disable the cursor first if we're disabling all the planes.
7093 * It'll remain on the screen after the planes are re-enabled
7096 if (acrtc_state->active_planes == 0)
7097 amdgpu_dm_commit_cursors(state);
7099 /* update planes when needed */
7100 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7101 struct drm_crtc *crtc = new_plane_state->crtc;
7102 struct drm_crtc_state *new_crtc_state;
7103 struct drm_framebuffer *fb = new_plane_state->fb;
7104 bool plane_needs_flip;
7105 struct dc_plane_state *dc_plane;
7106 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7108 /* Cursor plane is handled after stream updates */
7109 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7112 if (!fb || !crtc || pcrtc != crtc)
7115 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7116 if (!new_crtc_state->active)
7119 dc_plane = dm_new_plane_state->dc_state;
7121 bundle->surface_updates[planes_count].surface = dc_plane;
7122 if (new_pcrtc_state->color_mgmt_changed) {
7123 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7124 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7125 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7128 fill_dc_scaling_info(new_plane_state,
7129 &bundle->scaling_infos[planes_count]);
7131 bundle->surface_updates[planes_count].scaling_info =
7132 &bundle->scaling_infos[planes_count];
7134 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7136 pflip_present = pflip_present || plane_needs_flip;
7138 if (!plane_needs_flip) {
7143 abo = gem_to_amdgpu_bo(fb->obj[0]);
7146 * Wait for all fences on this FB. Do limited wait to avoid
7147 * deadlock during GPU reset when this fence will not signal
7148 * but we hold reservation lock for the BO.
7150 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7152 msecs_to_jiffies(5000));
7153 if (unlikely(r <= 0))
7154 DRM_ERROR("Waiting for fences timed out!");
7156 fill_dc_plane_info_and_addr(
7157 dm->adev, new_plane_state,
7158 dm_new_plane_state->tiling_flags,
7159 &bundle->plane_infos[planes_count],
7160 &bundle->flip_addrs[planes_count].address,
7161 dm_new_plane_state->tmz_surface, false);
7163 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7164 new_plane_state->plane->index,
7165 bundle->plane_infos[planes_count].dcc.enable);
7167 bundle->surface_updates[planes_count].plane_info =
7168 &bundle->plane_infos[planes_count];
7171 * Only allow immediate flips for fast updates that don't
7172 * change FB pitch, DCC state, rotation or mirroing.
7174 bundle->flip_addrs[planes_count].flip_immediate =
7175 crtc->state->async_flip &&
7176 acrtc_state->update_type == UPDATE_TYPE_FAST;
7178 timestamp_ns = ktime_get_ns();
7179 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7180 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7181 bundle->surface_updates[planes_count].surface = dc_plane;
7183 if (!bundle->surface_updates[planes_count].surface) {
7184 DRM_ERROR("No surface for CRTC: id=%d\n",
7185 acrtc_attach->crtc_id);
7189 if (plane == pcrtc->primary)
7190 update_freesync_state_on_stream(
7193 acrtc_state->stream,
7195 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7197 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7199 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7200 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7206 if (pflip_present) {
7208 /* Use old throttling in non-vrr fixed refresh rate mode
7209 * to keep flip scheduling based on target vblank counts
7210 * working in a backwards compatible way, e.g., for
7211 * clients using the GLX_OML_sync_control extension or
7212 * DRI3/Present extension with defined target_msc.
7214 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7217 /* For variable refresh rate mode only:
7218 * Get vblank of last completed flip to avoid > 1 vrr
7219 * flips per video frame by use of throttling, but allow
7220 * flip programming anywhere in the possibly large
7221 * variable vrr vblank interval for fine-grained flip
7222 * timing control and more opportunity to avoid stutter
7223 * on late submission of flips.
7225 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7226 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7227 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7230 target_vblank = last_flip_vblank + wait_for_vblank;
7233 * Wait until we're out of the vertical blank period before the one
7234 * targeted by the flip
7236 while ((acrtc_attach->enabled &&
7237 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7238 0, &vpos, &hpos, NULL,
7239 NULL, &pcrtc->hwmode)
7240 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7241 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7242 (int)(target_vblank -
7243 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7244 usleep_range(1000, 1100);
7248 * Prepare the flip event for the pageflip interrupt to handle.
7250 * This only works in the case where we've already turned on the
7251 * appropriate hardware blocks (eg. HUBP) so in the transition case
7252 * from 0 -> n planes we have to skip a hardware generated event
7253 * and rely on sending it from software.
7255 if (acrtc_attach->base.state->event &&
7256 acrtc_state->active_planes > 0) {
7257 drm_crtc_vblank_get(pcrtc);
7259 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7261 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7262 prepare_flip_isr(acrtc_attach);
7264 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7267 if (acrtc_state->stream) {
7268 if (acrtc_state->freesync_vrr_info_changed)
7269 bundle->stream_update.vrr_infopacket =
7270 &acrtc_state->stream->vrr_infopacket;
7274 /* Update the planes if changed or disable if we don't have any. */
7275 if ((planes_count || acrtc_state->active_planes == 0) &&
7276 acrtc_state->stream) {
7277 bundle->stream_update.stream = acrtc_state->stream;
7278 if (new_pcrtc_state->mode_changed) {
7279 bundle->stream_update.src = acrtc_state->stream->src;
7280 bundle->stream_update.dst = acrtc_state->stream->dst;
7283 if (new_pcrtc_state->color_mgmt_changed) {
7285 * TODO: This isn't fully correct since we've actually
7286 * already modified the stream in place.
7288 bundle->stream_update.gamut_remap =
7289 &acrtc_state->stream->gamut_remap_matrix;
7290 bundle->stream_update.output_csc_transform =
7291 &acrtc_state->stream->csc_color_matrix;
7292 bundle->stream_update.out_transfer_func =
7293 acrtc_state->stream->out_transfer_func;
7296 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7297 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7298 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7301 * If FreeSync state on the stream has changed then we need to
7302 * re-adjust the min/max bounds now that DC doesn't handle this
7303 * as part of commit.
7305 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7306 amdgpu_dm_vrr_active(acrtc_state)) {
7307 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7308 dc_stream_adjust_vmin_vmax(
7309 dm->dc, acrtc_state->stream,
7310 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7311 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7313 mutex_lock(&dm->dc_lock);
7314 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7315 acrtc_state->stream->link->psr_settings.psr_allow_active)
7316 amdgpu_dm_psr_disable(acrtc_state->stream);
7318 dc_commit_updates_for_stream(dm->dc,
7319 bundle->surface_updates,
7321 acrtc_state->stream,
7322 &bundle->stream_update,
7326 * Enable or disable the interrupts on the backend.
7328 * Most pipes are put into power gating when unused.
7330 * When power gating is enabled on a pipe we lose the
7331 * interrupt enablement state when power gating is disabled.
7333 * So we need to update the IRQ control state in hardware
7334 * whenever the pipe turns on (since it could be previously
7335 * power gated) or off (since some pipes can't be power gated
7338 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7339 dm_update_pflip_irq_state(drm_to_adev(dev),
7342 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7343 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7344 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7345 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7346 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7347 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7348 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7349 amdgpu_dm_psr_enable(acrtc_state->stream);
7352 mutex_unlock(&dm->dc_lock);
7356 * Update cursor state *after* programming all the planes.
7357 * This avoids redundant programming in the case where we're going
7358 * to be disabling a single plane - those pipes are being disabled.
7360 if (acrtc_state->active_planes)
7361 amdgpu_dm_commit_cursors(state);
7367 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7368 struct drm_atomic_state *state)
7370 struct amdgpu_device *adev = drm_to_adev(dev);
7371 struct amdgpu_dm_connector *aconnector;
7372 struct drm_connector *connector;
7373 struct drm_connector_state *old_con_state, *new_con_state;
7374 struct drm_crtc_state *new_crtc_state;
7375 struct dm_crtc_state *new_dm_crtc_state;
7376 const struct dc_stream_status *status;
7379 /* Notify device removals. */
7380 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7381 if (old_con_state->crtc != new_con_state->crtc) {
7382 /* CRTC changes require notification. */
7386 if (!new_con_state->crtc)
7389 new_crtc_state = drm_atomic_get_new_crtc_state(
7390 state, new_con_state->crtc);
7392 if (!new_crtc_state)
7395 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7399 aconnector = to_amdgpu_dm_connector(connector);
7401 mutex_lock(&adev->dm.audio_lock);
7402 inst = aconnector->audio_inst;
7403 aconnector->audio_inst = -1;
7404 mutex_unlock(&adev->dm.audio_lock);
7406 amdgpu_dm_audio_eld_notify(adev, inst);
7409 /* Notify audio device additions. */
7410 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7411 if (!new_con_state->crtc)
7414 new_crtc_state = drm_atomic_get_new_crtc_state(
7415 state, new_con_state->crtc);
7417 if (!new_crtc_state)
7420 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7423 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7424 if (!new_dm_crtc_state->stream)
7427 status = dc_stream_get_status(new_dm_crtc_state->stream);
7431 aconnector = to_amdgpu_dm_connector(connector);
7433 mutex_lock(&adev->dm.audio_lock);
7434 inst = status->audio_inst;
7435 aconnector->audio_inst = inst;
7436 mutex_unlock(&adev->dm.audio_lock);
7438 amdgpu_dm_audio_eld_notify(adev, inst);
7443 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7444 * @crtc_state: the DRM CRTC state
7445 * @stream_state: the DC stream state.
7447 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7448 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7450 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7451 struct dc_stream_state *stream_state)
7453 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7456 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7457 struct drm_atomic_state *state,
7461 * Add check here for SoC's that support hardware cursor plane, to
7462 * unset legacy_cursor_update
7465 return drm_atomic_helper_commit(dev, state, nonblock);
7467 /*TODO Handle EINTR, reenable IRQ*/
7471 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7472 * @state: The atomic state to commit
7474 * This will tell DC to commit the constructed DC state from atomic_check,
7475 * programming the hardware. Any failures here implies a hardware failure, since
7476 * atomic check should have filtered anything non-kosher.
7478 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7480 struct drm_device *dev = state->dev;
7481 struct amdgpu_device *adev = drm_to_adev(dev);
7482 struct amdgpu_display_manager *dm = &adev->dm;
7483 struct dm_atomic_state *dm_state;
7484 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7486 struct drm_crtc *crtc;
7487 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7488 unsigned long flags;
7489 bool wait_for_vblank = true;
7490 struct drm_connector *connector;
7491 struct drm_connector_state *old_con_state, *new_con_state;
7492 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7493 int crtc_disable_count = 0;
7494 bool mode_set_reset_required = false;
7496 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7497 drm_atomic_helper_calc_timestamping_constants(state);
7499 dm_state = dm_atomic_get_new_state(state);
7500 if (dm_state && dm_state->context) {
7501 dc_state = dm_state->context;
7503 /* No state changes, retain current state. */
7504 dc_state_temp = dc_create_state(dm->dc);
7505 ASSERT(dc_state_temp);
7506 dc_state = dc_state_temp;
7507 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7510 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7511 new_crtc_state, i) {
7512 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7514 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7516 if (old_crtc_state->active &&
7517 (!new_crtc_state->active ||
7518 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7519 manage_dm_interrupts(adev, acrtc, false);
7520 dc_stream_release(dm_old_crtc_state->stream);
7524 /* update changed items */
7525 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7526 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7529 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7532 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7533 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7534 "connectors_changed:%d\n",
7536 new_crtc_state->enable,
7537 new_crtc_state->active,
7538 new_crtc_state->planes_changed,
7539 new_crtc_state->mode_changed,
7540 new_crtc_state->active_changed,
7541 new_crtc_state->connectors_changed);
7543 /* Copy all transient state flags into dc state */
7544 if (dm_new_crtc_state->stream) {
7545 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7546 dm_new_crtc_state->stream);
7549 /* handles headless hotplug case, updating new_state and
7550 * aconnector as needed
7553 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7555 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7557 if (!dm_new_crtc_state->stream) {
7559 * this could happen because of issues with
7560 * userspace notifications delivery.
7561 * In this case userspace tries to set mode on
7562 * display which is disconnected in fact.
7563 * dc_sink is NULL in this case on aconnector.
7564 * We expect reset mode will come soon.
7566 * This can also happen when unplug is done
7567 * during resume sequence ended
7569 * In this case, we want to pretend we still
7570 * have a sink to keep the pipe running so that
7571 * hw state is consistent with the sw state
7573 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7574 __func__, acrtc->base.base.id);
7578 if (dm_old_crtc_state->stream)
7579 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7581 pm_runtime_get_noresume(dev->dev);
7583 acrtc->enabled = true;
7584 acrtc->hw_mode = new_crtc_state->mode;
7585 crtc->hwmode = new_crtc_state->mode;
7586 mode_set_reset_required = true;
7587 } else if (modereset_required(new_crtc_state)) {
7588 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7589 /* i.e. reset mode */
7590 if (dm_old_crtc_state->stream)
7591 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7592 mode_set_reset_required = true;
7594 } /* for_each_crtc_in_state() */
7597 /* if there mode set or reset, disable eDP PSR */
7598 if (mode_set_reset_required)
7599 amdgpu_dm_psr_disable_all(dm);
7601 dm_enable_per_frame_crtc_master_sync(dc_state);
7602 mutex_lock(&dm->dc_lock);
7603 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7604 mutex_unlock(&dm->dc_lock);
7607 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7608 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7610 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7612 if (dm_new_crtc_state->stream != NULL) {
7613 const struct dc_stream_status *status =
7614 dc_stream_get_status(dm_new_crtc_state->stream);
7617 status = dc_stream_get_status_from_state(dc_state,
7618 dm_new_crtc_state->stream);
7620 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7622 acrtc->otg_inst = status->primary_otg_inst;
7625 #ifdef CONFIG_DRM_AMD_DC_HDCP
7626 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7627 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7628 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7629 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7631 new_crtc_state = NULL;
7634 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7636 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7638 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7639 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7640 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7641 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7645 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7646 hdcp_update_display(
7647 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7648 new_con_state->hdcp_content_type,
7649 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7654 /* Handle connector state changes */
7655 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7656 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7657 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7658 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7659 struct dc_surface_update dummy_updates[MAX_SURFACES];
7660 struct dc_stream_update stream_update;
7661 struct dc_info_packet hdr_packet;
7662 struct dc_stream_status *status = NULL;
7663 bool abm_changed, hdr_changed, scaling_changed;
7665 memset(&dummy_updates, 0, sizeof(dummy_updates));
7666 memset(&stream_update, 0, sizeof(stream_update));
7669 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7670 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7673 /* Skip any modesets/resets */
7674 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7677 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7678 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7680 scaling_changed = is_scaling_state_different(dm_new_con_state,
7683 abm_changed = dm_new_crtc_state->abm_level !=
7684 dm_old_crtc_state->abm_level;
7687 is_hdr_metadata_different(old_con_state, new_con_state);
7689 if (!scaling_changed && !abm_changed && !hdr_changed)
7692 stream_update.stream = dm_new_crtc_state->stream;
7693 if (scaling_changed) {
7694 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7695 dm_new_con_state, dm_new_crtc_state->stream);
7697 stream_update.src = dm_new_crtc_state->stream->src;
7698 stream_update.dst = dm_new_crtc_state->stream->dst;
7702 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7704 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7708 fill_hdr_info_packet(new_con_state, &hdr_packet);
7709 stream_update.hdr_static_metadata = &hdr_packet;
7712 status = dc_stream_get_status(dm_new_crtc_state->stream);
7714 WARN_ON(!status->plane_count);
7717 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7718 * Here we create an empty update on each plane.
7719 * To fix this, DC should permit updating only stream properties.
7721 for (j = 0; j < status->plane_count; j++)
7722 dummy_updates[j].surface = status->plane_states[0];
7725 mutex_lock(&dm->dc_lock);
7726 dc_commit_updates_for_stream(dm->dc,
7728 status->plane_count,
7729 dm_new_crtc_state->stream,
7732 mutex_unlock(&dm->dc_lock);
7735 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7736 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7737 new_crtc_state, i) {
7738 if (old_crtc_state->active && !new_crtc_state->active)
7739 crtc_disable_count++;
7741 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7742 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7744 /* For freesync config update on crtc state and params for irq */
7745 update_stream_irq_parameters(dm, dm_new_crtc_state);
7747 /* Handle vrr on->off / off->on transitions */
7748 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7753 * Enable interrupts for CRTCs that are newly enabled or went through
7754 * a modeset. It was intentionally deferred until after the front end
7755 * state was modified to wait until the OTG was on and so the IRQ
7756 * handlers didn't access stale or invalid state.
7758 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7759 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7761 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7763 if (new_crtc_state->active &&
7764 (!old_crtc_state->active ||
7765 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7766 dc_stream_retain(dm_new_crtc_state->stream);
7767 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7768 manage_dm_interrupts(adev, acrtc, true);
7770 #ifdef CONFIG_DEBUG_FS
7772 * Frontend may have changed so reapply the CRC capture
7773 * settings for the stream.
7775 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7777 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7778 amdgpu_dm_crtc_configure_crc_source(
7779 crtc, dm_new_crtc_state,
7780 dm_new_crtc_state->crc_src);
7786 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7787 if (new_crtc_state->async_flip)
7788 wait_for_vblank = false;
7790 /* update planes when needed per crtc*/
7791 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7792 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7794 if (dm_new_crtc_state->stream)
7795 amdgpu_dm_commit_planes(state, dc_state, dev,
7796 dm, crtc, wait_for_vblank);
7799 /* Update audio instances for each connector. */
7800 amdgpu_dm_commit_audio(dev, state);
7803 * send vblank event on all events not handled in flip and
7804 * mark consumed event for drm_atomic_helper_commit_hw_done
7806 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7807 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7809 if (new_crtc_state->event)
7810 drm_send_event_locked(dev, &new_crtc_state->event->base);
7812 new_crtc_state->event = NULL;
7814 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7816 /* Signal HW programming completion */
7817 drm_atomic_helper_commit_hw_done(state);
7819 if (wait_for_vblank)
7820 drm_atomic_helper_wait_for_flip_done(dev, state);
7822 drm_atomic_helper_cleanup_planes(dev, state);
7825 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7826 * so we can put the GPU into runtime suspend if we're not driving any
7829 for (i = 0; i < crtc_disable_count; i++)
7830 pm_runtime_put_autosuspend(dev->dev);
7831 pm_runtime_mark_last_busy(dev->dev);
7834 dc_release_state(dc_state_temp);
7838 static int dm_force_atomic_commit(struct drm_connector *connector)
7841 struct drm_device *ddev = connector->dev;
7842 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7843 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7844 struct drm_plane *plane = disconnected_acrtc->base.primary;
7845 struct drm_connector_state *conn_state;
7846 struct drm_crtc_state *crtc_state;
7847 struct drm_plane_state *plane_state;
7852 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7854 /* Construct an atomic state to restore previous display setting */
7857 * Attach connectors to drm_atomic_state
7859 conn_state = drm_atomic_get_connector_state(state, connector);
7861 ret = PTR_ERR_OR_ZERO(conn_state);
7865 /* Attach crtc to drm_atomic_state*/
7866 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7868 ret = PTR_ERR_OR_ZERO(crtc_state);
7872 /* force a restore */
7873 crtc_state->mode_changed = true;
7875 /* Attach plane to drm_atomic_state */
7876 plane_state = drm_atomic_get_plane_state(state, plane);
7878 ret = PTR_ERR_OR_ZERO(plane_state);
7883 /* Call commit internally with the state we just constructed */
7884 ret = drm_atomic_commit(state);
7889 DRM_ERROR("Restoring old state failed with %i\n", ret);
7890 drm_atomic_state_put(state);
7896 * This function handles all cases when set mode does not come upon hotplug.
7897 * This includes when a display is unplugged then plugged back into the
7898 * same port and when running without usermode desktop manager supprot
7900 void dm_restore_drm_connector_state(struct drm_device *dev,
7901 struct drm_connector *connector)
7903 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7904 struct amdgpu_crtc *disconnected_acrtc;
7905 struct dm_crtc_state *acrtc_state;
7907 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7910 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7911 if (!disconnected_acrtc)
7914 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7915 if (!acrtc_state->stream)
7919 * If the previous sink is not released and different from the current,
7920 * we deduce we are in a state where we can not rely on usermode call
7921 * to turn on the display, so we do it here
7923 if (acrtc_state->stream->sink != aconnector->dc_sink)
7924 dm_force_atomic_commit(&aconnector->base);
7928 * Grabs all modesetting locks to serialize against any blocking commits,
7929 * Waits for completion of all non blocking commits.
7931 static int do_aquire_global_lock(struct drm_device *dev,
7932 struct drm_atomic_state *state)
7934 struct drm_crtc *crtc;
7935 struct drm_crtc_commit *commit;
7939 * Adding all modeset locks to aquire_ctx will
7940 * ensure that when the framework release it the
7941 * extra locks we are locking here will get released to
7943 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7947 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7948 spin_lock(&crtc->commit_lock);
7949 commit = list_first_entry_or_null(&crtc->commit_list,
7950 struct drm_crtc_commit, commit_entry);
7952 drm_crtc_commit_get(commit);
7953 spin_unlock(&crtc->commit_lock);
7959 * Make sure all pending HW programming completed and
7962 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7965 ret = wait_for_completion_interruptible_timeout(
7966 &commit->flip_done, 10*HZ);
7969 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7970 "timed out\n", crtc->base.id, crtc->name);
7972 drm_crtc_commit_put(commit);
7975 return ret < 0 ? ret : 0;
7978 static void get_freesync_config_for_crtc(
7979 struct dm_crtc_state *new_crtc_state,
7980 struct dm_connector_state *new_con_state)
7982 struct mod_freesync_config config = {0};
7983 struct amdgpu_dm_connector *aconnector =
7984 to_amdgpu_dm_connector(new_con_state->base.connector);
7985 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7986 int vrefresh = drm_mode_vrefresh(mode);
7988 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7989 vrefresh >= aconnector->min_vfreq &&
7990 vrefresh <= aconnector->max_vfreq;
7992 if (new_crtc_state->vrr_supported) {
7993 new_crtc_state->stream->ignore_msa_timing_param = true;
7994 config.state = new_crtc_state->base.vrr_enabled ?
7995 VRR_STATE_ACTIVE_VARIABLE :
7997 config.min_refresh_in_uhz =
7998 aconnector->min_vfreq * 1000000;
7999 config.max_refresh_in_uhz =
8000 aconnector->max_vfreq * 1000000;
8001 config.vsif_supported = true;
8005 new_crtc_state->freesync_config = config;
8008 static void reset_freesync_config_for_crtc(
8009 struct dm_crtc_state *new_crtc_state)
8011 new_crtc_state->vrr_supported = false;
8013 memset(&new_crtc_state->vrr_infopacket, 0,
8014 sizeof(new_crtc_state->vrr_infopacket));
8017 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8018 struct drm_atomic_state *state,
8019 struct drm_crtc *crtc,
8020 struct drm_crtc_state *old_crtc_state,
8021 struct drm_crtc_state *new_crtc_state,
8023 bool *lock_and_validation_needed)
8025 struct dm_atomic_state *dm_state = NULL;
8026 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8027 struct dc_stream_state *new_stream;
8031 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8032 * update changed items
8034 struct amdgpu_crtc *acrtc = NULL;
8035 struct amdgpu_dm_connector *aconnector = NULL;
8036 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8037 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8041 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8042 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8043 acrtc = to_amdgpu_crtc(crtc);
8044 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8046 /* TODO This hack should go away */
8047 if (aconnector && enable) {
8048 /* Make sure fake sink is created in plug-in scenario */
8049 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8051 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8054 if (IS_ERR(drm_new_conn_state)) {
8055 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8059 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8060 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8062 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8065 new_stream = create_validate_stream_for_sink(aconnector,
8066 &new_crtc_state->mode,
8068 dm_old_crtc_state->stream);
8071 * we can have no stream on ACTION_SET if a display
8072 * was disconnected during S3, in this case it is not an
8073 * error, the OS will be updated after detection, and
8074 * will do the right thing on next atomic commit
8078 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8079 __func__, acrtc->base.base.id);
8085 * TODO: Check VSDB bits to decide whether this should
8086 * be enabled or not.
8088 new_stream->triggered_crtc_reset.enabled =
8089 dm->force_timing_sync;
8091 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8093 ret = fill_hdr_info_packet(drm_new_conn_state,
8094 &new_stream->hdr_static_metadata);
8099 * If we already removed the old stream from the context
8100 * (and set the new stream to NULL) then we can't reuse
8101 * the old stream even if the stream and scaling are unchanged.
8102 * We'll hit the BUG_ON and black screen.
8104 * TODO: Refactor this function to allow this check to work
8105 * in all conditions.
8107 if (dm_new_crtc_state->stream &&
8108 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8109 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8110 new_crtc_state->mode_changed = false;
8111 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8112 new_crtc_state->mode_changed);
8116 /* mode_changed flag may get updated above, need to check again */
8117 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8121 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8122 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8123 "connectors_changed:%d\n",
8125 new_crtc_state->enable,
8126 new_crtc_state->active,
8127 new_crtc_state->planes_changed,
8128 new_crtc_state->mode_changed,
8129 new_crtc_state->active_changed,
8130 new_crtc_state->connectors_changed);
8132 /* Remove stream for any changed/disabled CRTC */
8135 if (!dm_old_crtc_state->stream)
8138 ret = dm_atomic_get_state(state, &dm_state);
8142 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8145 /* i.e. reset mode */
8146 if (dc_remove_stream_from_ctx(
8149 dm_old_crtc_state->stream) != DC_OK) {
8154 dc_stream_release(dm_old_crtc_state->stream);
8155 dm_new_crtc_state->stream = NULL;
8157 reset_freesync_config_for_crtc(dm_new_crtc_state);
8159 *lock_and_validation_needed = true;
8161 } else {/* Add stream for any updated/enabled CRTC */
8163 * Quick fix to prevent NULL pointer on new_stream when
8164 * added MST connectors not found in existing crtc_state in the chained mode
8165 * TODO: need to dig out the root cause of that
8167 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8170 if (modereset_required(new_crtc_state))
8173 if (modeset_required(new_crtc_state, new_stream,
8174 dm_old_crtc_state->stream)) {
8176 WARN_ON(dm_new_crtc_state->stream);
8178 ret = dm_atomic_get_state(state, &dm_state);
8182 dm_new_crtc_state->stream = new_stream;
8184 dc_stream_retain(new_stream);
8186 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8189 if (dc_add_stream_to_ctx(
8192 dm_new_crtc_state->stream) != DC_OK) {
8197 *lock_and_validation_needed = true;
8202 /* Release extra reference */
8204 dc_stream_release(new_stream);
8207 * We want to do dc stream updates that do not require a
8208 * full modeset below.
8210 if (!(enable && aconnector && new_crtc_state->active))
8213 * Given above conditions, the dc state cannot be NULL because:
8214 * 1. We're in the process of enabling CRTCs (just been added
8215 * to the dc context, or already is on the context)
8216 * 2. Has a valid connector attached, and
8217 * 3. Is currently active and enabled.
8218 * => The dc stream state currently exists.
8220 BUG_ON(dm_new_crtc_state->stream == NULL);
8222 /* Scaling or underscan settings */
8223 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8224 update_stream_scaling_settings(
8225 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8228 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8231 * Color management settings. We also update color properties
8232 * when a modeset is needed, to ensure it gets reprogrammed.
8234 if (dm_new_crtc_state->base.color_mgmt_changed ||
8235 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8236 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8241 /* Update Freesync settings. */
8242 get_freesync_config_for_crtc(dm_new_crtc_state,
8249 dc_stream_release(new_stream);
8253 static bool should_reset_plane(struct drm_atomic_state *state,
8254 struct drm_plane *plane,
8255 struct drm_plane_state *old_plane_state,
8256 struct drm_plane_state *new_plane_state)
8258 struct drm_plane *other;
8259 struct drm_plane_state *old_other_state, *new_other_state;
8260 struct drm_crtc_state *new_crtc_state;
8264 * TODO: Remove this hack once the checks below are sufficient
8265 * enough to determine when we need to reset all the planes on
8268 if (state->allow_modeset)
8271 /* Exit early if we know that we're adding or removing the plane. */
8272 if (old_plane_state->crtc != new_plane_state->crtc)
8275 /* old crtc == new_crtc == NULL, plane not in context. */
8276 if (!new_plane_state->crtc)
8280 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8282 if (!new_crtc_state)
8285 /* CRTC Degamma changes currently require us to recreate planes. */
8286 if (new_crtc_state->color_mgmt_changed)
8289 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8293 * If there are any new primary or overlay planes being added or
8294 * removed then the z-order can potentially change. To ensure
8295 * correct z-order and pipe acquisition the current DC architecture
8296 * requires us to remove and recreate all existing planes.
8298 * TODO: Come up with a more elegant solution for this.
8300 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8301 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8303 if (other->type == DRM_PLANE_TYPE_CURSOR)
8306 if (old_other_state->crtc != new_plane_state->crtc &&
8307 new_other_state->crtc != new_plane_state->crtc)
8310 if (old_other_state->crtc != new_other_state->crtc)
8313 /* Src/dst size and scaling updates. */
8314 if (old_other_state->src_w != new_other_state->src_w ||
8315 old_other_state->src_h != new_other_state->src_h ||
8316 old_other_state->crtc_w != new_other_state->crtc_w ||
8317 old_other_state->crtc_h != new_other_state->crtc_h)
8320 /* Rotation / mirroring updates. */
8321 if (old_other_state->rotation != new_other_state->rotation)
8324 /* Blending updates. */
8325 if (old_other_state->pixel_blend_mode !=
8326 new_other_state->pixel_blend_mode)
8329 /* Alpha updates. */
8330 if (old_other_state->alpha != new_other_state->alpha)
8333 /* Colorspace changes. */
8334 if (old_other_state->color_range != new_other_state->color_range ||
8335 old_other_state->color_encoding != new_other_state->color_encoding)
8338 /* Framebuffer checks fall at the end. */
8339 if (!old_other_state->fb || !new_other_state->fb)
8342 /* Pixel format changes can require bandwidth updates. */
8343 if (old_other_state->fb->format != new_other_state->fb->format)
8346 old_dm_plane_state = to_dm_plane_state(old_other_state);
8347 new_dm_plane_state = to_dm_plane_state(new_other_state);
8349 /* Tiling and DCC changes also require bandwidth updates. */
8350 if (old_dm_plane_state->tiling_flags !=
8351 new_dm_plane_state->tiling_flags)
8358 static int dm_update_plane_state(struct dc *dc,
8359 struct drm_atomic_state *state,
8360 struct drm_plane *plane,
8361 struct drm_plane_state *old_plane_state,
8362 struct drm_plane_state *new_plane_state,
8364 bool *lock_and_validation_needed)
8367 struct dm_atomic_state *dm_state = NULL;
8368 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8369 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8370 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8371 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8372 struct amdgpu_crtc *new_acrtc;
8377 new_plane_crtc = new_plane_state->crtc;
8378 old_plane_crtc = old_plane_state->crtc;
8379 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8380 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8382 /*TODO Implement better atomic check for cursor plane */
8383 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8384 if (!enable || !new_plane_crtc ||
8385 drm_atomic_plane_disabling(plane->state, new_plane_state))
8388 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8390 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8391 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8392 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8393 new_plane_state->crtc_w, new_plane_state->crtc_h);
8400 needs_reset = should_reset_plane(state, plane, old_plane_state,
8403 /* Remove any changed/removed planes */
8408 if (!old_plane_crtc)
8411 old_crtc_state = drm_atomic_get_old_crtc_state(
8412 state, old_plane_crtc);
8413 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8415 if (!dm_old_crtc_state->stream)
8418 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8419 plane->base.id, old_plane_crtc->base.id);
8421 ret = dm_atomic_get_state(state, &dm_state);
8425 if (!dc_remove_plane_from_context(
8427 dm_old_crtc_state->stream,
8428 dm_old_plane_state->dc_state,
8429 dm_state->context)) {
8435 dc_plane_state_release(dm_old_plane_state->dc_state);
8436 dm_new_plane_state->dc_state = NULL;
8438 *lock_and_validation_needed = true;
8440 } else { /* Add new planes */
8441 struct dc_plane_state *dc_new_plane_state;
8443 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8446 if (!new_plane_crtc)
8449 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8450 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8452 if (!dm_new_crtc_state->stream)
8458 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8462 WARN_ON(dm_new_plane_state->dc_state);
8464 dc_new_plane_state = dc_create_plane_state(dc);
8465 if (!dc_new_plane_state)
8468 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8469 plane->base.id, new_plane_crtc->base.id);
8471 ret = fill_dc_plane_attributes(
8472 drm_to_adev(new_plane_crtc->dev),
8477 dc_plane_state_release(dc_new_plane_state);
8481 ret = dm_atomic_get_state(state, &dm_state);
8483 dc_plane_state_release(dc_new_plane_state);
8488 * Any atomic check errors that occur after this will
8489 * not need a release. The plane state will be attached
8490 * to the stream, and therefore part of the atomic
8491 * state. It'll be released when the atomic state is
8494 if (!dc_add_plane_to_context(
8496 dm_new_crtc_state->stream,
8498 dm_state->context)) {
8500 dc_plane_state_release(dc_new_plane_state);
8504 dm_new_plane_state->dc_state = dc_new_plane_state;
8506 /* Tell DC to do a full surface update every time there
8507 * is a plane change. Inefficient, but works for now.
8509 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8511 *lock_and_validation_needed = true;
8518 #if defined(CONFIG_DRM_AMD_DC_DCN)
8519 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8521 struct drm_connector *connector;
8522 struct drm_connector_state *conn_state;
8523 struct amdgpu_dm_connector *aconnector = NULL;
8525 for_each_new_connector_in_state(state, connector, conn_state, i) {
8526 if (conn_state->crtc != crtc)
8529 aconnector = to_amdgpu_dm_connector(connector);
8530 if (!aconnector->port || !aconnector->mst_port)
8539 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8544 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8545 * @dev: The DRM device
8546 * @state: The atomic state to commit
8548 * Validate that the given atomic state is programmable by DC into hardware.
8549 * This involves constructing a &struct dc_state reflecting the new hardware
8550 * state we wish to commit, then querying DC to see if it is programmable. It's
8551 * important not to modify the existing DC state. Otherwise, atomic_check
8552 * may unexpectedly commit hardware changes.
8554 * When validating the DC state, it's important that the right locks are
8555 * acquired. For full updates case which removes/adds/updates streams on one
8556 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8557 * that any such full update commit will wait for completion of any outstanding
8558 * flip using DRMs synchronization events.
8560 * Note that DM adds the affected connectors for all CRTCs in state, when that
8561 * might not seem necessary. This is because DC stream creation requires the
8562 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8563 * be possible but non-trivial - a possible TODO item.
8565 * Return: -Error code if validation failed.
8567 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8568 struct drm_atomic_state *state)
8570 struct amdgpu_device *adev = drm_to_adev(dev);
8571 struct dm_atomic_state *dm_state = NULL;
8572 struct dc *dc = adev->dm.dc;
8573 struct drm_connector *connector;
8574 struct drm_connector_state *old_con_state, *new_con_state;
8575 struct drm_crtc *crtc;
8576 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8577 struct drm_plane *plane;
8578 struct drm_plane_state *old_plane_state, *new_plane_state;
8579 enum dc_status status;
8581 bool lock_and_validation_needed = false;
8583 amdgpu_check_debugfs_connector_property_change(adev, state);
8585 ret = drm_atomic_helper_check_modeset(dev, state);
8589 /* Check connector changes */
8590 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8591 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8592 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8594 /* Skip connectors that are disabled or part of modeset already. */
8595 if (!old_con_state->crtc && !new_con_state->crtc)
8598 if (!new_con_state->crtc)
8601 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8602 if (IS_ERR(new_crtc_state)) {
8603 ret = PTR_ERR(new_crtc_state);
8607 if (dm_old_con_state->abm_level !=
8608 dm_new_con_state->abm_level)
8609 new_crtc_state->connectors_changed = true;
8612 #if defined(CONFIG_DRM_AMD_DC_DCN)
8613 if (adev->asic_type >= CHIP_NAVI10) {
8614 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8615 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8616 ret = add_affected_mst_dsc_crtcs(state, crtc);
8623 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8624 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8625 !new_crtc_state->color_mgmt_changed &&
8626 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8629 if (!new_crtc_state->enable)
8632 ret = drm_atomic_add_affected_connectors(state, crtc);
8636 ret = drm_atomic_add_affected_planes(state, crtc);
8642 * Add all primary and overlay planes on the CRTC to the state
8643 * whenever a plane is enabled to maintain correct z-ordering
8644 * and to enable fast surface updates.
8646 drm_for_each_crtc(crtc, dev) {
8647 bool modified = false;
8649 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8650 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8653 if (new_plane_state->crtc == crtc ||
8654 old_plane_state->crtc == crtc) {
8663 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8664 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8668 drm_atomic_get_plane_state(state, plane);
8670 if (IS_ERR(new_plane_state)) {
8671 ret = PTR_ERR(new_plane_state);
8677 /* Prepass for updating tiling flags on new planes. */
8678 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8679 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8680 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8682 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8683 &new_dm_plane_state->tmz_surface);
8688 /* Remove exiting planes if they are modified */
8689 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8690 ret = dm_update_plane_state(dc, state, plane,
8694 &lock_and_validation_needed);
8699 /* Disable all crtcs which require disable */
8700 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8701 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8705 &lock_and_validation_needed);
8710 /* Enable all crtcs which require enable */
8711 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8712 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8716 &lock_and_validation_needed);
8721 /* Add new/modified planes */
8722 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8723 ret = dm_update_plane_state(dc, state, plane,
8727 &lock_and_validation_needed);
8732 /* Run this here since we want to validate the streams we created */
8733 ret = drm_atomic_helper_check_planes(dev, state);
8737 if (state->legacy_cursor_update) {
8739 * This is a fast cursor update coming from the plane update
8740 * helper, check if it can be done asynchronously for better
8743 state->async_update =
8744 !drm_atomic_helper_async_check(dev, state);
8747 * Skip the remaining global validation if this is an async
8748 * update. Cursor updates can be done without affecting
8749 * state or bandwidth calcs and this avoids the performance
8750 * penalty of locking the private state object and
8751 * allocating a new dc_state.
8753 if (state->async_update)
8757 /* Check scaling and underscan changes*/
8758 /* TODO Removed scaling changes validation due to inability to commit
8759 * new stream into context w\o causing full reset. Need to
8760 * decide how to handle.
8762 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8763 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8764 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8765 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8767 /* Skip any modesets/resets */
8768 if (!acrtc || drm_atomic_crtc_needs_modeset(
8769 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8772 /* Skip any thing not scale or underscan changes */
8773 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8776 lock_and_validation_needed = true;
8780 * Streams and planes are reset when there are changes that affect
8781 * bandwidth. Anything that affects bandwidth needs to go through
8782 * DC global validation to ensure that the configuration can be applied
8785 * We have to currently stall out here in atomic_check for outstanding
8786 * commits to finish in this case because our IRQ handlers reference
8787 * DRM state directly - we can end up disabling interrupts too early
8790 * TODO: Remove this stall and drop DM state private objects.
8792 if (lock_and_validation_needed) {
8793 ret = dm_atomic_get_state(state, &dm_state);
8797 ret = do_aquire_global_lock(dev, state);
8801 #if defined(CONFIG_DRM_AMD_DC_DCN)
8802 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8805 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8811 * Perform validation of MST topology in the state:
8812 * We need to perform MST atomic check before calling
8813 * dc_validate_global_state(), or there is a chance
8814 * to get stuck in an infinite loop and hang eventually.
8816 ret = drm_dp_mst_atomic_check(state);
8819 status = dc_validate_global_state(dc, dm_state->context, false);
8820 if (status != DC_OK) {
8821 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8822 dc_status_to_str(status), status);
8828 * The commit is a fast update. Fast updates shouldn't change
8829 * the DC context, affect global validation, and can have their
8830 * commit work done in parallel with other commits not touching
8831 * the same resource. If we have a new DC context as part of
8832 * the DM atomic state from validation we need to free it and
8833 * retain the existing one instead.
8835 * Furthermore, since the DM atomic state only contains the DC
8836 * context and can safely be annulled, we can free the state
8837 * and clear the associated private object now to free
8838 * some memory and avoid a possible use-after-free later.
8841 for (i = 0; i < state->num_private_objs; i++) {
8842 struct drm_private_obj *obj = state->private_objs[i].ptr;
8844 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8845 int j = state->num_private_objs-1;
8847 dm_atomic_destroy_state(obj,
8848 state->private_objs[i].state);
8850 /* If i is not at the end of the array then the
8851 * last element needs to be moved to where i was
8852 * before the array can safely be truncated.
8855 state->private_objs[i] =
8856 state->private_objs[j];
8858 state->private_objs[j].ptr = NULL;
8859 state->private_objs[j].state = NULL;
8860 state->private_objs[j].old_state = NULL;
8861 state->private_objs[j].new_state = NULL;
8863 state->num_private_objs = j;
8869 /* Store the overall update type for use later in atomic check. */
8870 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8871 struct dm_crtc_state *dm_new_crtc_state =
8872 to_dm_crtc_state(new_crtc_state);
8874 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8879 /* Must be success */
8884 if (ret == -EDEADLK)
8885 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8886 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8887 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8889 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8894 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8895 struct amdgpu_dm_connector *amdgpu_dm_connector)
8898 bool capable = false;
8900 if (amdgpu_dm_connector->dc_link &&
8901 dm_helpers_dp_read_dpcd(
8903 amdgpu_dm_connector->dc_link,
8904 DP_DOWN_STREAM_PORT_COUNT,
8906 sizeof(dpcd_data))) {
8907 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8912 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8916 bool edid_check_required;
8917 struct detailed_timing *timing;
8918 struct detailed_non_pixel *data;
8919 struct detailed_data_monitor_range *range;
8920 struct amdgpu_dm_connector *amdgpu_dm_connector =
8921 to_amdgpu_dm_connector(connector);
8922 struct dm_connector_state *dm_con_state = NULL;
8924 struct drm_device *dev = connector->dev;
8925 struct amdgpu_device *adev = drm_to_adev(dev);
8926 bool freesync_capable = false;
8928 if (!connector->state) {
8929 DRM_ERROR("%s - Connector has no state", __func__);
8934 dm_con_state = to_dm_connector_state(connector->state);
8936 amdgpu_dm_connector->min_vfreq = 0;
8937 amdgpu_dm_connector->max_vfreq = 0;
8938 amdgpu_dm_connector->pixel_clock_mhz = 0;
8943 dm_con_state = to_dm_connector_state(connector->state);
8945 edid_check_required = false;
8946 if (!amdgpu_dm_connector->dc_sink) {
8947 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8950 if (!adev->dm.freesync_module)
8953 * if edid non zero restrict freesync only for dp and edp
8956 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8957 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8958 edid_check_required = is_dp_capable_without_timing_msa(
8960 amdgpu_dm_connector);
8963 if (edid_check_required == true && (edid->version > 1 ||
8964 (edid->version == 1 && edid->revision > 1))) {
8965 for (i = 0; i < 4; i++) {
8967 timing = &edid->detailed_timings[i];
8968 data = &timing->data.other_data;
8969 range = &data->data.range;
8971 * Check if monitor has continuous frequency mode
8973 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8976 * Check for flag range limits only. If flag == 1 then
8977 * no additional timing information provided.
8978 * Default GTF, GTF Secondary curve and CVT are not
8981 if (range->flags != 1)
8984 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8985 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8986 amdgpu_dm_connector->pixel_clock_mhz =
8987 range->pixel_clock_mhz * 10;
8991 if (amdgpu_dm_connector->max_vfreq -
8992 amdgpu_dm_connector->min_vfreq > 10) {
8994 freesync_capable = true;
9000 dm_con_state->freesync_capable = freesync_capable;
9002 if (connector->vrr_capable_property)
9003 drm_connector_set_vrr_capable_property(connector,
9007 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9009 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9011 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9013 if (link->type == dc_connection_none)
9015 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9016 dpcd_data, sizeof(dpcd_data))) {
9017 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9019 if (dpcd_data[0] == 0) {
9020 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9021 link->psr_settings.psr_feature_enabled = false;
9023 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9024 link->psr_settings.psr_feature_enabled = true;
9027 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9032 * amdgpu_dm_link_setup_psr() - configure psr link
9033 * @stream: stream state
9035 * Return: true if success
9037 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9039 struct dc_link *link = NULL;
9040 struct psr_config psr_config = {0};
9041 struct psr_context psr_context = {0};
9047 link = stream->link;
9049 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9051 if (psr_config.psr_version > 0) {
9052 psr_config.psr_exit_link_training_required = 0x1;
9053 psr_config.psr_frame_capture_indication_req = 0;
9054 psr_config.psr_rfb_setup_time = 0x37;
9055 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9056 psr_config.allow_smu_optimizations = 0x0;
9058 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9061 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9067 * amdgpu_dm_psr_enable() - enable psr f/w
9068 * @stream: stream state
9070 * Return: true if success
9072 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9074 struct dc_link *link = stream->link;
9075 unsigned int vsync_rate_hz = 0;
9076 struct dc_static_screen_params params = {0};
9077 /* Calculate number of static frames before generating interrupt to
9080 // Init fail safe of 2 frames static
9081 unsigned int num_frames_static = 2;
9083 DRM_DEBUG_DRIVER("Enabling psr...\n");
9085 vsync_rate_hz = div64_u64(div64_u64((
9086 stream->timing.pix_clk_100hz * 100),
9087 stream->timing.v_total),
9088 stream->timing.h_total);
9091 * Calculate number of frames such that at least 30 ms of time has
9094 if (vsync_rate_hz != 0) {
9095 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9096 num_frames_static = (30000 / frame_time_microsec) + 1;
9099 params.triggers.cursor_update = true;
9100 params.triggers.overlay_update = true;
9101 params.triggers.surface_update = true;
9102 params.num_frames = num_frames_static;
9104 dc_stream_set_static_screen_params(link->ctx->dc,
9108 return dc_link_set_psr_allow_active(link, true, false);
9112 * amdgpu_dm_psr_disable() - disable psr f/w
9113 * @stream: stream state
9115 * Return: true if success
9117 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9120 DRM_DEBUG_DRIVER("Disabling psr...\n");
9122 return dc_link_set_psr_allow_active(stream->link, false, true);
9126 * amdgpu_dm_psr_disable() - disable psr f/w
9127 * if psr is enabled on any stream
9129 * Return: true if success
9131 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9133 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9134 return dc_set_psr_allow_active(dm->dc, false);
9137 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9139 struct amdgpu_device *adev = drm_to_adev(dev);
9140 struct dc *dc = adev->dm.dc;
9143 mutex_lock(&adev->dm.dc_lock);
9144 if (dc->current_state) {
9145 for (i = 0; i < dc->current_state->stream_count; ++i)
9146 dc->current_state->streams[i]
9147 ->triggered_crtc_reset.enabled =
9148 adev->dm.force_timing_sync;
9150 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9151 dc_trigger_sync(dc, dc->current_state);
9153 mutex_unlock(&adev->dm.dc_lock);