2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
103 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
105 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
108 /* Number of bytes in PSP header for firmware. */
109 #define PSP_HEADER_BYTES 0x100
111 /* Number of bytes in PSP footer for firmware. */
112 #define PSP_FOOTER_BYTES 0x100
117 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119 * requests into DC requests, and DC responses into DRM responses.
121 * The root control structure is &struct amdgpu_display_manager.
124 /* basic init/fini API */
125 static int amdgpu_dm_init(struct amdgpu_device *adev);
126 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 * initializes drm_device display related structures, based on the information
130 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131 * drm_encoder, drm_mode_config
133 * Returns 0 on success
135 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136 /* removes and deallocates the drm structures, created by the above function */
137 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
139 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 unsigned long possible_crtcs,
142 const struct dc_plane_cap *plane_cap);
143 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144 struct drm_plane *plane,
145 uint32_t link_index);
146 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147 struct amdgpu_dm_connector *amdgpu_dm_connector,
149 struct amdgpu_encoder *amdgpu_encoder);
150 static int amdgpu_dm_encoder_init(struct drm_device *dev,
151 struct amdgpu_encoder *aencoder,
152 uint32_t link_index);
154 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
156 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157 struct drm_atomic_state *state,
160 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
162 static int amdgpu_dm_atomic_check(struct drm_device *dev,
163 struct drm_atomic_state *state);
165 static void handle_cursor_update(struct drm_plane *plane,
166 struct drm_plane_state *old_plane_state);
168 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
175 * dm_vblank_get_counter
178 * Get counter for number of vertical blanks
181 * struct amdgpu_device *adev - [in] desired amdgpu device
182 * int disp_idx - [in] which CRTC to get the counter from
185 * Counter for vertical blanks
187 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
189 if (crtc >= adev->mode_info.num_crtc)
192 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
193 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
197 if (acrtc_state->stream == NULL) {
198 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
203 return dc_stream_get_vblank_counter(acrtc_state->stream);
207 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
208 u32 *vbl, u32 *position)
210 uint32_t v_blank_start, v_blank_end, h_position, v_position;
212 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
216 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 if (acrtc_state->stream == NULL) {
220 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
226 * TODO rework base driver to use values directly.
227 * for now parse it back into reg-format
229 dc_stream_get_scanoutpos(acrtc_state->stream,
235 *position = v_position | (h_position << 16);
236 *vbl = v_blank_start | (v_blank_end << 16);
242 static bool dm_is_idle(void *handle)
248 static int dm_wait_for_idle(void *handle)
254 static bool dm_check_soft_reset(void *handle)
259 static int dm_soft_reset(void *handle)
265 static struct amdgpu_crtc *
266 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 struct drm_device *dev = adev->ddev;
270 struct drm_crtc *crtc;
271 struct amdgpu_crtc *amdgpu_crtc;
273 if (otg_inst == -1) {
275 return adev->mode_info.crtcs[0];
278 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 amdgpu_crtc = to_amdgpu_crtc(crtc);
281 if (amdgpu_crtc->otg_inst == otg_inst)
288 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
290 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
295 * dm_pflip_high_irq() - Handle pageflip interrupt
296 * @interrupt_params: ignored
298 * Handles the pageflip interrupt by notifying all interested parties
299 * that the pageflip has been completed.
301 static void dm_pflip_high_irq(void *interrupt_params)
303 struct amdgpu_crtc *amdgpu_crtc;
304 struct common_irq_params *irq_params = interrupt_params;
305 struct amdgpu_device *adev = irq_params->adev;
307 struct drm_pending_vblank_event *e;
308 struct dm_crtc_state *acrtc_state;
309 uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
314 /* IRQ could occur when in initial stage */
315 /* TODO work and BO cleanup */
316 if (amdgpu_crtc == NULL) {
317 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
321 spin_lock_irqsave(&adev->ddev->event_lock, flags);
323 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325 amdgpu_crtc->pflip_status,
326 AMDGPU_FLIP_SUBMITTED,
327 amdgpu_crtc->crtc_id,
329 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
333 /* page flip completed. */
334 e = amdgpu_crtc->event;
335 amdgpu_crtc->event = NULL;
340 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
343 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
345 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346 &v_blank_end, &hpos, &vpos) ||
347 (vpos < v_blank_start)) {
348 /* Update to correct count and vblank timestamp if racing with
349 * vblank irq. This also updates to the correct vblank timestamp
350 * even in VRR mode, as scanout is past the front-porch atm.
352 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
354 /* Wake up userspace by sending the pageflip event with proper
355 * count and timestamp of vblank of flip completion.
358 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
360 /* Event sent, so done with vblank for this flip */
361 drm_crtc_vblank_put(&amdgpu_crtc->base);
364 /* VRR active and inside front-porch: vblank count and
365 * timestamp for pageflip event will only be up to date after
366 * drm_crtc_handle_vblank() has been executed from late vblank
367 * irq handler after start of back-porch (vline 0). We queue the
368 * pageflip event for send-out by drm_crtc_handle_vblank() with
369 * updated timestamp and count, once it runs after us.
371 * We need to open-code this instead of using the helper
372 * drm_crtc_arm_vblank_event(), as that helper would
373 * call drm_crtc_accurate_vblank_count(), which we must
374 * not call in VRR mode while we are in front-porch!
377 /* sequence will be replaced by real count during send-out. */
378 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379 e->pipe = amdgpu_crtc->crtc_id;
381 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
385 /* Keep track of vblank of this flip for flip throttling. We use the
386 * cooked hw counter, as that one incremented at start of this vblank
387 * of pageflip completion, so last_flip_vblank is the forbidden count
388 * for queueing new pageflips if vsync + VRR is enabled.
390 amdgpu_crtc->last_flip_vblank =
391 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
393 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
394 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
396 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397 amdgpu_crtc->crtc_id, amdgpu_crtc,
398 vrr_active, (int) !e);
401 static void dm_vupdate_high_irq(void *interrupt_params)
403 struct common_irq_params *irq_params = interrupt_params;
404 struct amdgpu_device *adev = irq_params->adev;
405 struct amdgpu_crtc *acrtc;
406 struct dm_crtc_state *acrtc_state;
409 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 acrtc_state = to_dm_crtc_state(acrtc->base.state);
414 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
416 amdgpu_dm_vrr_active(acrtc_state));
418 /* Core vblank handling is done here after end of front-porch in
419 * vrr mode, as vblank timestamping will give valid results
420 * while now done after front-porch. This will also deliver
421 * page-flip completion events that have been queued to us
422 * if a pageflip happened inside front-porch.
424 if (amdgpu_dm_vrr_active(acrtc_state)) {
425 drm_crtc_handle_vblank(&acrtc->base);
427 /* BTR processing for pre-DCE12 ASICs */
428 if (acrtc_state->stream &&
429 adev->family < AMDGPU_FAMILY_AI) {
430 spin_lock_irqsave(&adev->ddev->event_lock, flags);
431 mod_freesync_handle_v_update(
432 adev->dm.freesync_module,
434 &acrtc_state->vrr_params);
436 dc_stream_adjust_vmin_vmax(
439 &acrtc_state->vrr_params.adjust);
440 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
447 * dm_crtc_high_irq() - Handles CRTC interrupt
448 * @interrupt_params: used for determining the CRTC instance
450 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453 static void dm_crtc_high_irq(void *interrupt_params)
455 struct common_irq_params *irq_params = interrupt_params;
456 struct amdgpu_device *adev = irq_params->adev;
457 struct amdgpu_crtc *acrtc;
458 struct dm_crtc_state *acrtc_state;
461 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
465 acrtc_state = to_dm_crtc_state(acrtc->base.state);
467 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468 amdgpu_dm_vrr_active(acrtc_state),
469 acrtc_state->active_planes);
472 * Core vblank handling at start of front-porch is only possible
473 * in non-vrr mode, as only there vblank timestamping will give
474 * valid results while done in front-porch. Otherwise defer it
475 * to dm_vupdate_high_irq after end of front-porch.
477 if (!amdgpu_dm_vrr_active(acrtc_state))
478 drm_crtc_handle_vblank(&acrtc->base);
481 * Following stuff must happen at start of vblank, for crc
482 * computation and below-the-range btr support in vrr mode.
484 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
486 /* BTR updates need to happen before VUPDATE on Vega and above. */
487 if (adev->family < AMDGPU_FAMILY_AI)
490 spin_lock_irqsave(&adev->ddev->event_lock, flags);
492 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
493 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
494 mod_freesync_handle_v_update(adev->dm.freesync_module,
496 &acrtc_state->vrr_params);
498 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499 &acrtc_state->vrr_params.adjust);
503 * If there aren't any active_planes then DCH HUBP may be clock-gated.
504 * In that case, pageflip completion interrupts won't fire and pageflip
505 * completion events won't get delivered. Prevent this by sending
506 * pending pageflip events from here if a flip is still pending.
508 * If any planes are enabled, use dm_pflip_high_irq() instead, to
509 * avoid race conditions between flip programming and completion,
510 * which could cause too early flip completion events.
512 if (adev->family >= AMDGPU_FAMILY_RV &&
513 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
514 acrtc_state->active_planes == 0) {
516 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
518 drm_crtc_vblank_put(&acrtc->base);
520 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 static int dm_set_clockgating_state(void *handle,
527 enum amd_clockgating_state state)
532 static int dm_set_powergating_state(void *handle,
533 enum amd_powergating_state state)
538 /* Prototypes of private functions */
539 static int dm_early_init(void* handle);
541 /* Allocate memory for FBC compressed data */
542 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
544 struct drm_device *dev = connector->dev;
545 struct amdgpu_device *adev = dev->dev_private;
546 struct dm_comressor_info *compressor = &adev->dm.compressor;
547 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548 struct drm_display_mode *mode;
549 unsigned long max_size = 0;
551 if (adev->dm.dc->fbc_compressor == NULL)
554 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 if (compressor->bo_ptr)
561 list_for_each_entry(mode, &connector->modes, head) {
562 if (max_size < mode->htotal * mode->vtotal)
563 max_size = mode->htotal * mode->vtotal;
567 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
568 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
569 &compressor->gpu_addr, &compressor->cpu_addr);
572 DRM_ERROR("DM: Failed to initialize FBC\n");
574 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
582 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583 int pipe, bool *enabled,
584 unsigned char *buf, int max_bytes)
586 struct drm_device *dev = dev_get_drvdata(kdev);
587 struct amdgpu_device *adev = dev->dev_private;
588 struct drm_connector *connector;
589 struct drm_connector_list_iter conn_iter;
590 struct amdgpu_dm_connector *aconnector;
595 mutex_lock(&adev->dm.audio_lock);
597 drm_connector_list_iter_begin(dev, &conn_iter);
598 drm_for_each_connector_iter(connector, &conn_iter) {
599 aconnector = to_amdgpu_dm_connector(connector);
600 if (aconnector->audio_inst != port)
604 ret = drm_eld_size(connector->eld);
605 memcpy(buf, connector->eld, min(max_bytes, ret));
609 drm_connector_list_iter_end(&conn_iter);
611 mutex_unlock(&adev->dm.audio_lock);
613 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
618 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619 .get_eld = amdgpu_dm_audio_component_get_eld,
622 static int amdgpu_dm_audio_component_bind(struct device *kdev,
623 struct device *hda_kdev, void *data)
625 struct drm_device *dev = dev_get_drvdata(kdev);
626 struct amdgpu_device *adev = dev->dev_private;
627 struct drm_audio_component *acomp = data;
629 acomp->ops = &amdgpu_dm_audio_component_ops;
631 adev->dm.audio_component = acomp;
636 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637 struct device *hda_kdev, void *data)
639 struct drm_device *dev = dev_get_drvdata(kdev);
640 struct amdgpu_device *adev = dev->dev_private;
641 struct drm_audio_component *acomp = data;
645 adev->dm.audio_component = NULL;
648 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649 .bind = amdgpu_dm_audio_component_bind,
650 .unbind = amdgpu_dm_audio_component_unbind,
653 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
660 adev->mode_info.audio.enabled = true;
662 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
664 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665 adev->mode_info.audio.pin[i].channels = -1;
666 adev->mode_info.audio.pin[i].rate = -1;
667 adev->mode_info.audio.pin[i].bits_per_sample = -1;
668 adev->mode_info.audio.pin[i].status_bits = 0;
669 adev->mode_info.audio.pin[i].category_code = 0;
670 adev->mode_info.audio.pin[i].connected = false;
671 adev->mode_info.audio.pin[i].id =
672 adev->dm.dc->res_pool->audios[i]->inst;
673 adev->mode_info.audio.pin[i].offset = 0;
676 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
680 adev->dm.audio_registered = true;
685 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
690 if (!adev->mode_info.audio.enabled)
693 if (adev->dm.audio_registered) {
694 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695 adev->dm.audio_registered = false;
698 /* TODO: Disable audio? */
700 adev->mode_info.audio.enabled = false;
703 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
705 struct drm_audio_component *acomp = adev->dm.audio_component;
707 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
710 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
715 static int dm_dmub_hw_init(struct amdgpu_device *adev)
717 const struct dmcub_firmware_header_v1_0 *hdr;
718 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
719 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
720 const struct firmware *dmub_fw = adev->dm.dmub_fw;
721 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722 struct abm *abm = adev->dm.dc->res_pool->abm;
723 struct dmub_srv_hw_params hw_params;
724 enum dmub_status status;
725 const unsigned char *fw_inst_const, *fw_bss_data;
726 uint32_t i, fw_inst_const_size, fw_bss_data_size;
730 /* DMUB isn't supported on the ASIC. */
734 DRM_ERROR("No framebuffer info for DMUB service.\n");
739 /* Firmware required for DMUB support. */
740 DRM_ERROR("No firmware provided for DMUB.\n");
744 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745 if (status != DMUB_STATUS_OK) {
746 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
750 if (!has_hw_support) {
751 DRM_INFO("DMUB unsupported on ASIC\n");
755 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
757 fw_inst_const = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 fw_bss_data = dmub_fw->data +
762 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 le32_to_cpu(hdr->inst_const_bytes);
765 /* Copy firmware and bios info into FB memory. */
766 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
769 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
771 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772 * amdgpu_ucode_init_single_fw will load dmub firmware
773 * fw_inst_const part to cw0; otherwise, the firmware back door load
774 * will be done by dm_dmub_hw_init
776 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
781 if (fw_bss_data_size)
782 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783 fw_bss_data, fw_bss_data_size);
785 /* Copy firmware bios info into FB memory. */
786 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 /* Reset regions that need to be reset. */
790 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
793 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
796 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
799 /* Initialize hardware. */
800 memset(&hw_params, 0, sizeof(hw_params));
801 hw_params.fb_base = adev->gmc.fb_start;
802 hw_params.fb_offset = adev->gmc.aper_base;
804 /* backdoor load firmware and trigger dmub running */
805 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806 hw_params.load_inst_const = true;
809 hw_params.psp_version = dmcu->psp_version;
811 for (i = 0; i < fb_info->num_fb; ++i)
812 hw_params.fb[i] = &fb_info->fb[i];
814 status = dmub_srv_hw_init(dmub_srv, &hw_params);
815 if (status != DMUB_STATUS_OK) {
816 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
820 /* Wait for firmware load to finish. */
821 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822 if (status != DMUB_STATUS_OK)
823 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
825 /* Init DMCU and ABM if available. */
827 dmcu->funcs->dmcu_init(dmcu);
828 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832 if (!adev->dm.dc->ctx->dmub_srv) {
833 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
837 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838 adev->dm.dmcub_fw_version);
843 static int amdgpu_dm_init(struct amdgpu_device *adev)
845 struct dc_init_data init_data;
846 #ifdef CONFIG_DRM_AMD_DC_HDCP
847 struct dc_callback_init init_params;
851 adev->dm.ddev = adev->ddev;
852 adev->dm.adev = adev;
854 /* Zero all the fields */
855 memset(&init_data, 0, sizeof(init_data));
856 #ifdef CONFIG_DRM_AMD_DC_HDCP
857 memset(&init_params, 0, sizeof(init_params));
860 mutex_init(&adev->dm.dc_lock);
861 mutex_init(&adev->dm.audio_lock);
863 if(amdgpu_dm_irq_init(adev)) {
864 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
868 init_data.asic_id.chip_family = adev->family;
870 init_data.asic_id.pci_revision_id = adev->pdev->revision;
871 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
873 init_data.asic_id.vram_width = adev->gmc.vram_width;
874 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
875 init_data.asic_id.atombios_base_address =
876 adev->mode_info.atom_context->bios;
878 init_data.driver = adev;
880 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
882 if (!adev->dm.cgs_device) {
883 DRM_ERROR("amdgpu: failed to create cgs device.\n");
887 init_data.cgs_device = adev->dm.cgs_device;
889 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
891 switch (adev->asic_type) {
896 init_data.flags.gpu_vm_support = true;
902 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903 init_data.flags.fbc_support = true;
905 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906 init_data.flags.multi_mon_pp_mclk_switch = true;
908 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909 init_data.flags.disable_fractional_pwm = true;
911 init_data.flags.power_down_display_on_boot = true;
913 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
915 /* Display Core create. */
916 adev->dm.dc = dc_create(&init_data);
919 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
921 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
925 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926 adev->dm.dc->debug.force_single_disp_pipe_split = false;
927 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
933 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934 adev->dm.dc->debug.disable_stutter = true;
936 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937 adev->dm.dc->debug.disable_dsc = true;
939 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940 adev->dm.dc->debug.disable_clock_gate = true;
942 r = dm_dmub_hw_init(adev);
944 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
948 dc_hardware_init(adev->dm.dc);
950 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951 if (!adev->dm.freesync_module) {
953 "amdgpu: failed to initialize freesync_module.\n");
955 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
956 adev->dm.freesync_module);
958 amdgpu_dm_init_color_mod();
960 #ifdef CONFIG_DRM_AMD_DC_HDCP
961 if (adev->asic_type >= CHIP_RAVEN) {
962 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
964 if (!adev->dm.hdcp_workqueue)
965 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
967 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
969 dc_init_callbacks(adev->dm.dc, &init_params);
972 if (amdgpu_dm_initialize_drm_device(adev)) {
974 "amdgpu: failed to initialize sw for display support.\n");
978 /* Update the actual used number of crtc */
979 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
981 /* TODO: Add_display_info? */
983 /* TODO use dynamic cursor width */
984 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
985 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
987 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
989 "amdgpu: failed to initialize sw for display support.\n");
993 DRM_DEBUG_DRIVER("KMS initialized.\n");
997 amdgpu_dm_fini(adev);
1002 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1004 amdgpu_dm_audio_fini(adev);
1006 amdgpu_dm_destroy_drm_device(&adev->dm);
1008 #ifdef CONFIG_DRM_AMD_DC_HDCP
1009 if (adev->dm.hdcp_workqueue) {
1010 hdcp_destroy(adev->dm.hdcp_workqueue);
1011 adev->dm.hdcp_workqueue = NULL;
1015 dc_deinit_callbacks(adev->dm.dc);
1017 if (adev->dm.dc->ctx->dmub_srv) {
1018 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1019 adev->dm.dc->ctx->dmub_srv = NULL;
1022 if (adev->dm.dmub_bo)
1023 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1024 &adev->dm.dmub_bo_gpu_addr,
1025 &adev->dm.dmub_bo_cpu_addr);
1027 /* DC Destroy TODO: Replace destroy DAL */
1029 dc_destroy(&adev->dm.dc);
1031 * TODO: pageflip, vlank interrupt
1033 * amdgpu_dm_irq_fini(adev);
1036 if (adev->dm.cgs_device) {
1037 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1038 adev->dm.cgs_device = NULL;
1040 if (adev->dm.freesync_module) {
1041 mod_freesync_destroy(adev->dm.freesync_module);
1042 adev->dm.freesync_module = NULL;
1045 mutex_destroy(&adev->dm.audio_lock);
1046 mutex_destroy(&adev->dm.dc_lock);
1051 static int load_dmcu_fw(struct amdgpu_device *adev)
1053 const char *fw_name_dmcu = NULL;
1055 const struct dmcu_firmware_header_v1_0 *hdr;
1057 switch(adev->asic_type) {
1067 case CHIP_POLARIS11:
1068 case CHIP_POLARIS10:
1069 case CHIP_POLARIS12:
1077 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1078 case CHIP_SIENNA_CICHLID:
1082 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1085 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1086 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1087 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1088 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1093 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1097 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1098 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1102 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1104 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1105 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1106 adev->dm.fw_dmcu = NULL;
1110 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1115 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1117 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1119 release_firmware(adev->dm.fw_dmcu);
1120 adev->dm.fw_dmcu = NULL;
1124 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1125 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1126 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1127 adev->firmware.fw_size +=
1128 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1130 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1131 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1132 adev->firmware.fw_size +=
1133 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1135 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1137 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1142 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1144 struct amdgpu_device *adev = ctx;
1146 return dm_read_reg(adev->dm.dc->ctx, address);
1149 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1152 struct amdgpu_device *adev = ctx;
1154 return dm_write_reg(adev->dm.dc->ctx, address, value);
1157 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1159 struct dmub_srv_create_params create_params;
1160 struct dmub_srv_region_params region_params;
1161 struct dmub_srv_region_info region_info;
1162 struct dmub_srv_fb_params fb_params;
1163 struct dmub_srv_fb_info *fb_info;
1164 struct dmub_srv *dmub_srv;
1165 const struct dmcub_firmware_header_v1_0 *hdr;
1166 const char *fw_name_dmub;
1167 enum dmub_asic dmub_asic;
1168 enum dmub_status status;
1171 switch (adev->asic_type) {
1173 dmub_asic = DMUB_ASIC_DCN21;
1174 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1176 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1177 case CHIP_SIENNA_CICHLID:
1178 dmub_asic = DMUB_ASIC_DCN30;
1179 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1184 /* ASIC doesn't support DMUB. */
1188 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1190 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1194 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1196 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1200 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1202 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1203 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1204 AMDGPU_UCODE_ID_DMCUB;
1205 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1207 adev->firmware.fw_size +=
1208 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1210 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1211 adev->dm.dmcub_fw_version);
1214 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1216 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1217 dmub_srv = adev->dm.dmub_srv;
1220 DRM_ERROR("Failed to allocate DMUB service!\n");
1224 memset(&create_params, 0, sizeof(create_params));
1225 create_params.user_ctx = adev;
1226 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1227 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1228 create_params.asic = dmub_asic;
1230 /* Create the DMUB service. */
1231 status = dmub_srv_create(dmub_srv, &create_params);
1232 if (status != DMUB_STATUS_OK) {
1233 DRM_ERROR("Error creating DMUB service: %d\n", status);
1237 /* Calculate the size of all the regions for the DMUB service. */
1238 memset(®ion_params, 0, sizeof(region_params));
1240 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1241 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1242 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1243 region_params.vbios_size = adev->bios_size;
1244 region_params.fw_bss_data = region_params.bss_data_size ?
1245 adev->dm.dmub_fw->data +
1246 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1247 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1248 region_params.fw_inst_const =
1249 adev->dm.dmub_fw->data +
1250 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1253 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1256 if (status != DMUB_STATUS_OK) {
1257 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1262 * Allocate a framebuffer based on the total size of all the regions.
1263 * TODO: Move this into GART.
1265 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1266 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1267 &adev->dm.dmub_bo_gpu_addr,
1268 &adev->dm.dmub_bo_cpu_addr);
1272 /* Rebase the regions on the framebuffer address. */
1273 memset(&fb_params, 0, sizeof(fb_params));
1274 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1275 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1276 fb_params.region_info = ®ion_info;
1278 adev->dm.dmub_fb_info =
1279 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1280 fb_info = adev->dm.dmub_fb_info;
1284 "Failed to allocate framebuffer info for DMUB service!\n");
1288 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1289 if (status != DMUB_STATUS_OK) {
1290 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1297 static int dm_sw_init(void *handle)
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302 r = dm_dmub_sw_init(adev);
1306 return load_dmcu_fw(adev);
1309 static int dm_sw_fini(void *handle)
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1313 kfree(adev->dm.dmub_fb_info);
1314 adev->dm.dmub_fb_info = NULL;
1316 if (adev->dm.dmub_srv) {
1317 dmub_srv_destroy(adev->dm.dmub_srv);
1318 adev->dm.dmub_srv = NULL;
1321 release_firmware(adev->dm.dmub_fw);
1322 adev->dm.dmub_fw = NULL;
1324 release_firmware(adev->dm.fw_dmcu);
1325 adev->dm.fw_dmcu = NULL;
1330 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1332 struct amdgpu_dm_connector *aconnector;
1333 struct drm_connector *connector;
1334 struct drm_connector_list_iter iter;
1337 drm_connector_list_iter_begin(dev, &iter);
1338 drm_for_each_connector_iter(connector, &iter) {
1339 aconnector = to_amdgpu_dm_connector(connector);
1340 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1341 aconnector->mst_mgr.aux) {
1342 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1344 aconnector->base.base.id);
1346 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1348 DRM_ERROR("DM_MST: Failed to start MST\n");
1349 aconnector->dc_link->type =
1350 dc_connection_single;
1355 drm_connector_list_iter_end(&iter);
1360 static int dm_late_init(void *handle)
1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364 struct dmcu_iram_parameters params;
1365 unsigned int linear_lut[16];
1367 struct dmcu *dmcu = NULL;
1370 if (!adev->dm.fw_dmcu)
1371 return detect_mst_link_for_all_connectors(adev->ddev);
1373 dmcu = adev->dm.dc->res_pool->dmcu;
1375 for (i = 0; i < 16; i++)
1376 linear_lut[i] = 0xFFFF * i / 15;
1379 params.backlight_ramping_start = 0xCCCC;
1380 params.backlight_ramping_reduction = 0xCCCCCCCC;
1381 params.backlight_lut_array_size = 16;
1382 params.backlight_lut_array = linear_lut;
1384 /* Min backlight level after ABM reduction, Don't allow below 1%
1385 * 0xFFFF x 0.01 = 0x28F
1387 params.min_abm_backlight = 0x28F;
1389 ret = dmcu_load_iram(dmcu, params);
1394 return detect_mst_link_for_all_connectors(adev->ddev);
1397 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1399 struct amdgpu_dm_connector *aconnector;
1400 struct drm_connector *connector;
1401 struct drm_connector_list_iter iter;
1402 struct drm_dp_mst_topology_mgr *mgr;
1404 bool need_hotplug = false;
1406 drm_connector_list_iter_begin(dev, &iter);
1407 drm_for_each_connector_iter(connector, &iter) {
1408 aconnector = to_amdgpu_dm_connector(connector);
1409 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1410 aconnector->mst_port)
1413 mgr = &aconnector->mst_mgr;
1416 drm_dp_mst_topology_mgr_suspend(mgr);
1418 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1420 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1421 need_hotplug = true;
1425 drm_connector_list_iter_end(&iter);
1428 drm_kms_helper_hotplug_event(dev);
1431 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1433 struct smu_context *smu = &adev->smu;
1436 if (!is_support_sw_smu(adev))
1439 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1440 * on window driver dc implementation.
1441 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1442 * should be passed to smu during boot up and resume from s3.
1443 * boot up: dc calculate dcn watermark clock settings within dc_create,
1444 * dcn20_resource_construct
1445 * then call pplib functions below to pass the settings to smu:
1446 * smu_set_watermarks_for_clock_ranges
1447 * smu_set_watermarks_table
1448 * navi10_set_watermarks_table
1449 * smu_write_watermarks_table
1451 * For Renoir, clock settings of dcn watermark are also fixed values.
1452 * dc has implemented different flow for window driver:
1453 * dc_hardware_init / dc_set_power_state
1458 * smu_set_watermarks_for_clock_ranges
1459 * renoir_set_watermarks_table
1460 * smu_write_watermarks_table
1463 * dc_hardware_init -> amdgpu_dm_init
1464 * dc_set_power_state --> dm_resume
1466 * therefore, this function apply to navi10/12/14 but not Renoir
1469 switch(adev->asic_type) {
1478 mutex_lock(&smu->mutex);
1480 /* pass data to smu controller */
1481 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1482 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1483 ret = smu_write_watermarks_table(smu);
1486 mutex_unlock(&smu->mutex);
1487 DRM_ERROR("Failed to update WMTABLE!\n");
1490 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1493 mutex_unlock(&smu->mutex);
1499 * dm_hw_init() - Initialize DC device
1500 * @handle: The base driver device containing the amdgpu_dm device.
1502 * Initialize the &struct amdgpu_display_manager device. This involves calling
1503 * the initializers of each DM component, then populating the struct with them.
1505 * Although the function implies hardware initialization, both hardware and
1506 * software are initialized here. Splitting them out to their relevant init
1507 * hooks is a future TODO item.
1509 * Some notable things that are initialized here:
1511 * - Display Core, both software and hardware
1512 * - DC modules that we need (freesync and color management)
1513 * - DRM software states
1514 * - Interrupt sources and handlers
1516 * - Debug FS entries, if enabled
1518 static int dm_hw_init(void *handle)
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 /* Create DAL display manager */
1522 amdgpu_dm_init(adev);
1523 amdgpu_dm_hpd_init(adev);
1529 * dm_hw_fini() - Teardown DC device
1530 * @handle: The base driver device containing the amdgpu_dm device.
1532 * Teardown components within &struct amdgpu_display_manager that require
1533 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1534 * were loaded. Also flush IRQ workqueues and disable them.
1536 static int dm_hw_fini(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1540 amdgpu_dm_hpd_fini(adev);
1542 amdgpu_dm_irq_fini(adev);
1543 amdgpu_dm_fini(adev);
1548 static int dm_enable_vblank(struct drm_crtc *crtc);
1549 static void dm_disable_vblank(struct drm_crtc *crtc);
1551 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1552 struct dc_state *state, bool enable)
1554 enum dc_irq_source irq_source;
1555 struct amdgpu_crtc *acrtc;
1559 for (i = 0; i < state->stream_count; i++) {
1560 acrtc = get_crtc_by_otg_inst(
1561 adev, state->stream_status[i].primary_otg_inst);
1563 if (acrtc && state->stream_status[i].plane_count != 0) {
1564 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1566 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1567 acrtc->crtc_id, enable ? "en" : "dis", rc);
1569 DRM_WARN("Failed to %s pflip interrupts\n",
1570 enable ? "enable" : "disable");
1573 rc = dm_enable_vblank(&acrtc->base);
1575 DRM_WARN("Failed to enable vblank interrupts\n");
1577 dm_disable_vblank(&acrtc->base);
1585 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1587 struct dc_state *context = NULL;
1588 enum dc_status res = DC_ERROR_UNEXPECTED;
1590 struct dc_stream_state *del_streams[MAX_PIPES];
1591 int del_streams_count = 0;
1593 memset(del_streams, 0, sizeof(del_streams));
1595 context = dc_create_state(dc);
1596 if (context == NULL)
1597 goto context_alloc_fail;
1599 dc_resource_state_copy_construct_current(dc, context);
1601 /* First remove from context all streams */
1602 for (i = 0; i < context->stream_count; i++) {
1603 struct dc_stream_state *stream = context->streams[i];
1605 del_streams[del_streams_count++] = stream;
1608 /* Remove all planes for removed streams and then remove the streams */
1609 for (i = 0; i < del_streams_count; i++) {
1610 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1611 res = DC_FAIL_DETACH_SURFACES;
1615 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1621 res = dc_validate_global_state(dc, context, false);
1624 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1628 res = dc_commit_state(dc, context);
1631 dc_release_state(context);
1637 static int dm_suspend(void *handle)
1639 struct amdgpu_device *adev = handle;
1640 struct amdgpu_display_manager *dm = &adev->dm;
1643 if (adev->in_gpu_reset) {
1644 mutex_lock(&dm->dc_lock);
1645 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1647 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1649 amdgpu_dm_commit_zero_streams(dm->dc);
1651 amdgpu_dm_irq_suspend(adev);
1656 WARN_ON(adev->dm.cached_state);
1657 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1659 s3_handle_mst(adev->ddev, true);
1661 amdgpu_dm_irq_suspend(adev);
1664 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1669 static struct amdgpu_dm_connector *
1670 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1671 struct drm_crtc *crtc)
1674 struct drm_connector_state *new_con_state;
1675 struct drm_connector *connector;
1676 struct drm_crtc *crtc_from_state;
1678 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1679 crtc_from_state = new_con_state->crtc;
1681 if (crtc_from_state == crtc)
1682 return to_amdgpu_dm_connector(connector);
1688 static void emulated_link_detect(struct dc_link *link)
1690 struct dc_sink_init_data sink_init_data = { 0 };
1691 struct display_sink_capability sink_caps = { 0 };
1692 enum dc_edid_status edid_status;
1693 struct dc_context *dc_ctx = link->ctx;
1694 struct dc_sink *sink = NULL;
1695 struct dc_sink *prev_sink = NULL;
1697 link->type = dc_connection_none;
1698 prev_sink = link->local_sink;
1700 if (prev_sink != NULL)
1701 dc_sink_retain(prev_sink);
1703 switch (link->connector_signal) {
1704 case SIGNAL_TYPE_HDMI_TYPE_A: {
1705 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1710 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1711 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1716 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1717 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1722 case SIGNAL_TYPE_LVDS: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_LVDS;
1728 case SIGNAL_TYPE_EDP: {
1729 sink_caps.transaction_type =
1730 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1731 sink_caps.signal = SIGNAL_TYPE_EDP;
1735 case SIGNAL_TYPE_DISPLAY_PORT: {
1736 sink_caps.transaction_type =
1737 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1743 DC_ERROR("Invalid connector type! signal:%d\n",
1744 link->connector_signal);
1748 sink_init_data.link = link;
1749 sink_init_data.sink_signal = sink_caps.signal;
1751 sink = dc_sink_create(&sink_init_data);
1753 DC_ERROR("Failed to create sink!\n");
1757 /* dc_sink_create returns a new reference */
1758 link->local_sink = sink;
1760 edid_status = dm_helpers_read_local_edid(
1765 if (edid_status != EDID_OK)
1766 DC_ERROR("Failed to read EDID");
1770 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1771 struct amdgpu_display_manager *dm)
1774 struct dc_surface_update surface_updates[MAX_SURFACES];
1775 struct dc_plane_info plane_infos[MAX_SURFACES];
1776 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1777 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1778 struct dc_stream_update stream_update;
1782 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1785 dm_error("Failed to allocate update bundle\n");
1789 for (k = 0; k < dc_state->stream_count; k++) {
1790 bundle->stream_update.stream = dc_state->streams[k];
1792 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1793 bundle->surface_updates[m].surface =
1794 dc_state->stream_status->plane_states[m];
1795 bundle->surface_updates[m].surface->force_full_update =
1798 dc_commit_updates_for_stream(
1799 dm->dc, bundle->surface_updates,
1800 dc_state->stream_status->plane_count,
1801 dc_state->streams[k], &bundle->stream_update, dc_state);
1810 static int dm_resume(void *handle)
1812 struct amdgpu_device *adev = handle;
1813 struct drm_device *ddev = adev->ddev;
1814 struct amdgpu_display_manager *dm = &adev->dm;
1815 struct amdgpu_dm_connector *aconnector;
1816 struct drm_connector *connector;
1817 struct drm_connector_list_iter iter;
1818 struct drm_crtc *crtc;
1819 struct drm_crtc_state *new_crtc_state;
1820 struct dm_crtc_state *dm_new_crtc_state;
1821 struct drm_plane *plane;
1822 struct drm_plane_state *new_plane_state;
1823 struct dm_plane_state *dm_new_plane_state;
1824 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1825 enum dc_connection_type new_connection_type = dc_connection_none;
1826 struct dc_state *dc_state;
1829 if (adev->in_gpu_reset) {
1830 dc_state = dm->cached_dc_state;
1832 r = dm_dmub_hw_init(adev);
1834 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1836 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1839 amdgpu_dm_irq_resume_early(adev);
1841 for (i = 0; i < dc_state->stream_count; i++) {
1842 dc_state->streams[i]->mode_changed = true;
1843 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1844 dc_state->stream_status->plane_states[j]->update_flags.raw
1849 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1851 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1853 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1855 dc_release_state(dm->cached_dc_state);
1856 dm->cached_dc_state = NULL;
1858 amdgpu_dm_irq_resume_late(adev);
1860 mutex_unlock(&dm->dc_lock);
1864 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1865 dc_release_state(dm_state->context);
1866 dm_state->context = dc_create_state(dm->dc);
1867 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1868 dc_resource_state_construct(dm->dc, dm_state->context);
1870 /* Before powering on DC we need to re-initialize DMUB. */
1871 r = dm_dmub_hw_init(adev);
1873 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1875 /* power on hardware */
1876 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1878 /* program HPD filter */
1882 * early enable HPD Rx IRQ, should be done before set mode as short
1883 * pulse interrupts are used for MST
1885 amdgpu_dm_irq_resume_early(adev);
1887 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1888 s3_handle_mst(ddev, false);
1891 drm_connector_list_iter_begin(ddev, &iter);
1892 drm_for_each_connector_iter(connector, &iter) {
1893 aconnector = to_amdgpu_dm_connector(connector);
1896 * this is the case when traversing through already created
1897 * MST connectors, should be skipped
1899 if (aconnector->mst_port)
1902 mutex_lock(&aconnector->hpd_lock);
1903 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1904 DRM_ERROR("KMS: Failed to detect connector\n");
1906 if (aconnector->base.force && new_connection_type == dc_connection_none)
1907 emulated_link_detect(aconnector->dc_link);
1909 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1911 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1912 aconnector->fake_enable = false;
1914 if (aconnector->dc_sink)
1915 dc_sink_release(aconnector->dc_sink);
1916 aconnector->dc_sink = NULL;
1917 amdgpu_dm_update_connector_after_detect(aconnector);
1918 mutex_unlock(&aconnector->hpd_lock);
1920 drm_connector_list_iter_end(&iter);
1922 /* Force mode set in atomic commit */
1923 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1924 new_crtc_state->active_changed = true;
1927 * atomic_check is expected to create the dc states. We need to release
1928 * them here, since they were duplicated as part of the suspend
1931 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1932 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1933 if (dm_new_crtc_state->stream) {
1934 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1935 dc_stream_release(dm_new_crtc_state->stream);
1936 dm_new_crtc_state->stream = NULL;
1940 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1941 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1942 if (dm_new_plane_state->dc_state) {
1943 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1944 dc_plane_state_release(dm_new_plane_state->dc_state);
1945 dm_new_plane_state->dc_state = NULL;
1949 drm_atomic_helper_resume(ddev, dm->cached_state);
1951 dm->cached_state = NULL;
1953 amdgpu_dm_irq_resume_late(adev);
1955 amdgpu_dm_smu_write_watermarks_table(adev);
1963 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1964 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1965 * the base driver's device list to be initialized and torn down accordingly.
1967 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1970 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1972 .early_init = dm_early_init,
1973 .late_init = dm_late_init,
1974 .sw_init = dm_sw_init,
1975 .sw_fini = dm_sw_fini,
1976 .hw_init = dm_hw_init,
1977 .hw_fini = dm_hw_fini,
1978 .suspend = dm_suspend,
1979 .resume = dm_resume,
1980 .is_idle = dm_is_idle,
1981 .wait_for_idle = dm_wait_for_idle,
1982 .check_soft_reset = dm_check_soft_reset,
1983 .soft_reset = dm_soft_reset,
1984 .set_clockgating_state = dm_set_clockgating_state,
1985 .set_powergating_state = dm_set_powergating_state,
1988 const struct amdgpu_ip_block_version dm_ip_block =
1990 .type = AMD_IP_BLOCK_TYPE_DCE,
1994 .funcs = &amdgpu_dm_funcs,
2004 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2005 .fb_create = amdgpu_display_user_framebuffer_create,
2006 .output_poll_changed = drm_fb_helper_output_poll_changed,
2007 .atomic_check = amdgpu_dm_atomic_check,
2008 .atomic_commit = amdgpu_dm_atomic_commit,
2011 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2012 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2015 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2017 u32 max_cll, min_cll, max, min, q, r;
2018 struct amdgpu_dm_backlight_caps *caps;
2019 struct amdgpu_display_manager *dm;
2020 struct drm_connector *conn_base;
2021 struct amdgpu_device *adev;
2022 static const u8 pre_computed_values[] = {
2023 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2024 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2026 if (!aconnector || !aconnector->dc_link)
2029 conn_base = &aconnector->base;
2030 adev = conn_base->dev->dev_private;
2032 caps = &dm->backlight_caps;
2033 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2034 caps->aux_support = false;
2035 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2036 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2038 if (caps->ext_caps->bits.oled == 1 ||
2039 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2040 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2041 caps->aux_support = true;
2043 /* From the specification (CTA-861-G), for calculating the maximum
2044 * luminance we need to use:
2045 * Luminance = 50*2**(CV/32)
2046 * Where CV is a one-byte value.
2047 * For calculating this expression we may need float point precision;
2048 * to avoid this complexity level, we take advantage that CV is divided
2049 * by a constant. From the Euclids division algorithm, we know that CV
2050 * can be written as: CV = 32*q + r. Next, we replace CV in the
2051 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2052 * need to pre-compute the value of r/32. For pre-computing the values
2053 * We just used the following Ruby line:
2054 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2055 * The results of the above expressions can be verified at
2056 * pre_computed_values.
2060 max = (1 << q) * pre_computed_values[r];
2062 // min luminance: maxLum * (CV/255)^2 / 100
2063 q = DIV_ROUND_CLOSEST(min_cll, 255);
2064 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2066 caps->aux_max_input_signal = max;
2067 caps->aux_min_input_signal = min;
2070 void amdgpu_dm_update_connector_after_detect(
2071 struct amdgpu_dm_connector *aconnector)
2073 struct drm_connector *connector = &aconnector->base;
2074 struct drm_device *dev = connector->dev;
2075 struct dc_sink *sink;
2077 /* MST handled by drm_mst framework */
2078 if (aconnector->mst_mgr.mst_state == true)
2082 sink = aconnector->dc_link->local_sink;
2084 dc_sink_retain(sink);
2087 * Edid mgmt connector gets first update only in mode_valid hook and then
2088 * the connector sink is set to either fake or physical sink depends on link status.
2089 * Skip if already done during boot.
2091 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2092 && aconnector->dc_em_sink) {
2095 * For S3 resume with headless use eml_sink to fake stream
2096 * because on resume connector->sink is set to NULL
2098 mutex_lock(&dev->mode_config.mutex);
2101 if (aconnector->dc_sink) {
2102 amdgpu_dm_update_freesync_caps(connector, NULL);
2104 * retain and release below are used to
2105 * bump up refcount for sink because the link doesn't point
2106 * to it anymore after disconnect, so on next crtc to connector
2107 * reshuffle by UMD we will get into unwanted dc_sink release
2109 dc_sink_release(aconnector->dc_sink);
2111 aconnector->dc_sink = sink;
2112 dc_sink_retain(aconnector->dc_sink);
2113 amdgpu_dm_update_freesync_caps(connector,
2116 amdgpu_dm_update_freesync_caps(connector, NULL);
2117 if (!aconnector->dc_sink) {
2118 aconnector->dc_sink = aconnector->dc_em_sink;
2119 dc_sink_retain(aconnector->dc_sink);
2123 mutex_unlock(&dev->mode_config.mutex);
2126 dc_sink_release(sink);
2131 * TODO: temporary guard to look for proper fix
2132 * if this sink is MST sink, we should not do anything
2134 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2135 dc_sink_release(sink);
2139 if (aconnector->dc_sink == sink) {
2141 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2144 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2145 aconnector->connector_id);
2147 dc_sink_release(sink);
2151 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2152 aconnector->connector_id, aconnector->dc_sink, sink);
2154 mutex_lock(&dev->mode_config.mutex);
2157 * 1. Update status of the drm connector
2158 * 2. Send an event and let userspace tell us what to do
2162 * TODO: check if we still need the S3 mode update workaround.
2163 * If yes, put it here.
2165 if (aconnector->dc_sink)
2166 amdgpu_dm_update_freesync_caps(connector, NULL);
2168 aconnector->dc_sink = sink;
2169 dc_sink_retain(aconnector->dc_sink);
2170 if (sink->dc_edid.length == 0) {
2171 aconnector->edid = NULL;
2172 if (aconnector->dc_link->aux_mode) {
2173 drm_dp_cec_unset_edid(
2174 &aconnector->dm_dp_aux.aux);
2178 (struct edid *)sink->dc_edid.raw_edid;
2180 drm_connector_update_edid_property(connector,
2183 if (aconnector->dc_link->aux_mode)
2184 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2188 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2189 update_connector_ext_caps(aconnector);
2191 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2192 amdgpu_dm_update_freesync_caps(connector, NULL);
2193 drm_connector_update_edid_property(connector, NULL);
2194 aconnector->num_modes = 0;
2195 dc_sink_release(aconnector->dc_sink);
2196 aconnector->dc_sink = NULL;
2197 aconnector->edid = NULL;
2198 #ifdef CONFIG_DRM_AMD_DC_HDCP
2199 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2200 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2201 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2205 mutex_unlock(&dev->mode_config.mutex);
2208 dc_sink_release(sink);
2211 static void handle_hpd_irq(void *param)
2213 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2214 struct drm_connector *connector = &aconnector->base;
2215 struct drm_device *dev = connector->dev;
2216 enum dc_connection_type new_connection_type = dc_connection_none;
2217 #ifdef CONFIG_DRM_AMD_DC_HDCP
2218 struct amdgpu_device *adev = dev->dev_private;
2222 * In case of failure or MST no need to update connector status or notify the OS
2223 * since (for MST case) MST does this in its own context.
2225 mutex_lock(&aconnector->hpd_lock);
2227 #ifdef CONFIG_DRM_AMD_DC_HDCP
2228 if (adev->dm.hdcp_workqueue)
2229 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2231 if (aconnector->fake_enable)
2232 aconnector->fake_enable = false;
2234 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2235 DRM_ERROR("KMS: Failed to detect connector\n");
2237 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2238 emulated_link_detect(aconnector->dc_link);
2241 drm_modeset_lock_all(dev);
2242 dm_restore_drm_connector_state(dev, connector);
2243 drm_modeset_unlock_all(dev);
2245 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2246 drm_kms_helper_hotplug_event(dev);
2248 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2249 amdgpu_dm_update_connector_after_detect(aconnector);
2252 drm_modeset_lock_all(dev);
2253 dm_restore_drm_connector_state(dev, connector);
2254 drm_modeset_unlock_all(dev);
2256 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2257 drm_kms_helper_hotplug_event(dev);
2259 mutex_unlock(&aconnector->hpd_lock);
2263 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2265 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2267 bool new_irq_handled = false;
2269 int dpcd_bytes_to_read;
2271 const int max_process_count = 30;
2272 int process_count = 0;
2274 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2276 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2277 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2278 /* DPCD 0x200 - 0x201 for downstream IRQ */
2279 dpcd_addr = DP_SINK_COUNT;
2281 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2282 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2283 dpcd_addr = DP_SINK_COUNT_ESI;
2286 dret = drm_dp_dpcd_read(
2287 &aconnector->dm_dp_aux.aux,
2290 dpcd_bytes_to_read);
2292 while (dret == dpcd_bytes_to_read &&
2293 process_count < max_process_count) {
2299 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2300 /* handle HPD short pulse irq */
2301 if (aconnector->mst_mgr.mst_state)
2303 &aconnector->mst_mgr,
2307 if (new_irq_handled) {
2308 /* ACK at DPCD to notify down stream */
2309 const int ack_dpcd_bytes_to_write =
2310 dpcd_bytes_to_read - 1;
2312 for (retry = 0; retry < 3; retry++) {
2315 wret = drm_dp_dpcd_write(
2316 &aconnector->dm_dp_aux.aux,
2319 ack_dpcd_bytes_to_write);
2320 if (wret == ack_dpcd_bytes_to_write)
2324 /* check if there is new irq to be handled */
2325 dret = drm_dp_dpcd_read(
2326 &aconnector->dm_dp_aux.aux,
2329 dpcd_bytes_to_read);
2331 new_irq_handled = false;
2337 if (process_count == max_process_count)
2338 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2341 static void handle_hpd_rx_irq(void *param)
2343 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2344 struct drm_connector *connector = &aconnector->base;
2345 struct drm_device *dev = connector->dev;
2346 struct dc_link *dc_link = aconnector->dc_link;
2347 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2348 enum dc_connection_type new_connection_type = dc_connection_none;
2349 #ifdef CONFIG_DRM_AMD_DC_HDCP
2350 union hpd_irq_data hpd_irq_data;
2351 struct amdgpu_device *adev = dev->dev_private;
2353 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2357 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2358 * conflict, after implement i2c helper, this mutex should be
2361 if (dc_link->type != dc_connection_mst_branch)
2362 mutex_lock(&aconnector->hpd_lock);
2365 #ifdef CONFIG_DRM_AMD_DC_HDCP
2366 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2368 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2370 !is_mst_root_connector) {
2371 /* Downstream Port status changed. */
2372 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2373 DRM_ERROR("KMS: Failed to detect connector\n");
2375 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2376 emulated_link_detect(dc_link);
2378 if (aconnector->fake_enable)
2379 aconnector->fake_enable = false;
2381 amdgpu_dm_update_connector_after_detect(aconnector);
2384 drm_modeset_lock_all(dev);
2385 dm_restore_drm_connector_state(dev, connector);
2386 drm_modeset_unlock_all(dev);
2388 drm_kms_helper_hotplug_event(dev);
2389 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2391 if (aconnector->fake_enable)
2392 aconnector->fake_enable = false;
2394 amdgpu_dm_update_connector_after_detect(aconnector);
2397 drm_modeset_lock_all(dev);
2398 dm_restore_drm_connector_state(dev, connector);
2399 drm_modeset_unlock_all(dev);
2401 drm_kms_helper_hotplug_event(dev);
2404 #ifdef CONFIG_DRM_AMD_DC_HDCP
2405 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2406 if (adev->dm.hdcp_workqueue)
2407 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2410 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2411 (dc_link->type == dc_connection_mst_branch))
2412 dm_handle_hpd_rx_irq(aconnector);
2414 if (dc_link->type != dc_connection_mst_branch) {
2415 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2416 mutex_unlock(&aconnector->hpd_lock);
2420 static void register_hpd_handlers(struct amdgpu_device *adev)
2422 struct drm_device *dev = adev->ddev;
2423 struct drm_connector *connector;
2424 struct amdgpu_dm_connector *aconnector;
2425 const struct dc_link *dc_link;
2426 struct dc_interrupt_params int_params = {0};
2428 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2429 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2431 list_for_each_entry(connector,
2432 &dev->mode_config.connector_list, head) {
2434 aconnector = to_amdgpu_dm_connector(connector);
2435 dc_link = aconnector->dc_link;
2437 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2438 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2439 int_params.irq_source = dc_link->irq_source_hpd;
2441 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2443 (void *) aconnector);
2446 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2448 /* Also register for DP short pulse (hpd_rx). */
2449 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2450 int_params.irq_source = dc_link->irq_source_hpd_rx;
2452 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2454 (void *) aconnector);
2459 /* Register IRQ sources and initialize IRQ callbacks */
2460 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2462 struct dc *dc = adev->dm.dc;
2463 struct common_irq_params *c_irq_params;
2464 struct dc_interrupt_params int_params = {0};
2467 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2469 if (adev->asic_type >= CHIP_VEGA10)
2470 client_id = SOC15_IH_CLIENTID_DCE;
2472 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2473 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2476 * Actions of amdgpu_irq_add_id():
2477 * 1. Register a set() function with base driver.
2478 * Base driver will call set() function to enable/disable an
2479 * interrupt in DC hardware.
2480 * 2. Register amdgpu_dm_irq_handler().
2481 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2482 * coming from DC hardware.
2483 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2484 * for acknowledging and handling. */
2486 /* Use VBLANK interrupt */
2487 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2488 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2490 DRM_ERROR("Failed to add crtc irq id!\n");
2494 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2495 int_params.irq_source =
2496 dc_interrupt_to_irq_source(dc, i, 0);
2498 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2500 c_irq_params->adev = adev;
2501 c_irq_params->irq_src = int_params.irq_source;
2503 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2504 dm_crtc_high_irq, c_irq_params);
2507 /* Use VUPDATE interrupt */
2508 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2509 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2511 DRM_ERROR("Failed to add vupdate irq id!\n");
2515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516 int_params.irq_source =
2517 dc_interrupt_to_irq_source(dc, i, 0);
2519 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2521 c_irq_params->adev = adev;
2522 c_irq_params->irq_src = int_params.irq_source;
2524 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525 dm_vupdate_high_irq, c_irq_params);
2528 /* Use GRPH_PFLIP interrupt */
2529 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2531 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2533 DRM_ERROR("Failed to add page flip irq id!\n");
2537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538 int_params.irq_source =
2539 dc_interrupt_to_irq_source(dc, i, 0);
2541 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2543 c_irq_params->adev = adev;
2544 c_irq_params->irq_src = int_params.irq_source;
2546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 dm_pflip_high_irq, c_irq_params);
2552 r = amdgpu_irq_add_id(adev, client_id,
2553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2555 DRM_ERROR("Failed to add hpd irq id!\n");
2559 register_hpd_handlers(adev);
2564 #if defined(CONFIG_DRM_AMD_DC_DCN)
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2568 struct dc *dc = adev->dm.dc;
2569 struct common_irq_params *c_irq_params;
2570 struct dc_interrupt_params int_params = {0};
2574 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2575 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2578 * Actions of amdgpu_irq_add_id():
2579 * 1. Register a set() function with base driver.
2580 * Base driver will call set() function to enable/disable an
2581 * interrupt in DC hardware.
2582 * 2. Register amdgpu_dm_irq_handler().
2583 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2584 * coming from DC hardware.
2585 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2586 * for acknowledging and handling.
2589 /* Use VSTARTUP interrupt */
2590 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2591 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2593 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2596 DRM_ERROR("Failed to add crtc irq id!\n");
2600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601 int_params.irq_source =
2602 dc_interrupt_to_irq_source(dc, i, 0);
2604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2606 c_irq_params->adev = adev;
2607 c_irq_params->irq_src = int_params.irq_source;
2609 amdgpu_dm_irq_register_interrupt(
2610 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2613 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2614 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2615 * to trigger at end of each vblank, regardless of state of the lock,
2616 * matching DCE behaviour.
2618 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2619 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2621 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2624 DRM_ERROR("Failed to add vupdate irq id!\n");
2628 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2629 int_params.irq_source =
2630 dc_interrupt_to_irq_source(dc, i, 0);
2632 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2634 c_irq_params->adev = adev;
2635 c_irq_params->irq_src = int_params.irq_source;
2637 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2638 dm_vupdate_high_irq, c_irq_params);
2641 /* Use GRPH_PFLIP interrupt */
2642 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2643 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2645 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2647 DRM_ERROR("Failed to add page flip irq id!\n");
2651 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2652 int_params.irq_source =
2653 dc_interrupt_to_irq_source(dc, i, 0);
2655 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2657 c_irq_params->adev = adev;
2658 c_irq_params->irq_src = int_params.irq_source;
2660 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2661 dm_pflip_high_irq, c_irq_params);
2666 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2669 DRM_ERROR("Failed to add hpd irq id!\n");
2673 register_hpd_handlers(adev);
2680 * Acquires the lock for the atomic state object and returns
2681 * the new atomic state.
2683 * This should only be called during atomic check.
2685 static int dm_atomic_get_state(struct drm_atomic_state *state,
2686 struct dm_atomic_state **dm_state)
2688 struct drm_device *dev = state->dev;
2689 struct amdgpu_device *adev = dev->dev_private;
2690 struct amdgpu_display_manager *dm = &adev->dm;
2691 struct drm_private_state *priv_state;
2696 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2697 if (IS_ERR(priv_state))
2698 return PTR_ERR(priv_state);
2700 *dm_state = to_dm_atomic_state(priv_state);
2705 static struct dm_atomic_state *
2706 dm_atomic_get_new_state(struct drm_atomic_state *state)
2708 struct drm_device *dev = state->dev;
2709 struct amdgpu_device *adev = dev->dev_private;
2710 struct amdgpu_display_manager *dm = &adev->dm;
2711 struct drm_private_obj *obj;
2712 struct drm_private_state *new_obj_state;
2715 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2716 if (obj->funcs == dm->atomic_obj.funcs)
2717 return to_dm_atomic_state(new_obj_state);
2723 static struct dm_atomic_state *
2724 dm_atomic_get_old_state(struct drm_atomic_state *state)
2726 struct drm_device *dev = state->dev;
2727 struct amdgpu_device *adev = dev->dev_private;
2728 struct amdgpu_display_manager *dm = &adev->dm;
2729 struct drm_private_obj *obj;
2730 struct drm_private_state *old_obj_state;
2733 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2734 if (obj->funcs == dm->atomic_obj.funcs)
2735 return to_dm_atomic_state(old_obj_state);
2741 static struct drm_private_state *
2742 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2744 struct dm_atomic_state *old_state, *new_state;
2746 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2750 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2752 old_state = to_dm_atomic_state(obj->state);
2754 if (old_state && old_state->context)
2755 new_state->context = dc_copy_state(old_state->context);
2757 if (!new_state->context) {
2762 return &new_state->base;
2765 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2766 struct drm_private_state *state)
2768 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2770 if (dm_state && dm_state->context)
2771 dc_release_state(dm_state->context);
2776 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2777 .atomic_duplicate_state = dm_atomic_duplicate_state,
2778 .atomic_destroy_state = dm_atomic_destroy_state,
2781 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2783 struct dm_atomic_state *state;
2786 adev->mode_info.mode_config_initialized = true;
2788 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2789 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2791 adev->ddev->mode_config.max_width = 16384;
2792 adev->ddev->mode_config.max_height = 16384;
2794 adev->ddev->mode_config.preferred_depth = 24;
2795 adev->ddev->mode_config.prefer_shadow = 1;
2796 /* indicates support for immediate flip */
2797 adev->ddev->mode_config.async_page_flip = true;
2799 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2801 state = kzalloc(sizeof(*state), GFP_KERNEL);
2805 state->context = dc_create_state(adev->dm.dc);
2806 if (!state->context) {
2811 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2813 drm_atomic_private_obj_init(adev->ddev,
2814 &adev->dm.atomic_obj,
2816 &dm_atomic_state_funcs);
2818 r = amdgpu_display_modeset_create_props(adev);
2822 r = amdgpu_dm_audio_init(adev);
2829 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2830 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2831 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2833 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2834 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2836 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2838 #if defined(CONFIG_ACPI)
2839 struct amdgpu_dm_backlight_caps caps;
2841 if (dm->backlight_caps.caps_valid)
2844 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2845 if (caps.caps_valid) {
2846 dm->backlight_caps.caps_valid = true;
2847 if (caps.aux_support)
2849 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2850 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2852 dm->backlight_caps.min_input_signal =
2853 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2854 dm->backlight_caps.max_input_signal =
2855 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2858 if (dm->backlight_caps.aux_support)
2861 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2862 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2866 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2873 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2874 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2879 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2880 const uint32_t user_brightness)
2882 u32 min, max, conversion_pace;
2883 u32 brightness = user_brightness;
2888 if (!caps->aux_support) {
2889 max = caps->max_input_signal;
2890 min = caps->min_input_signal;
2892 * The brightness input is in the range 0-255
2893 * It needs to be rescaled to be between the
2894 * requested min and max input signal
2895 * It also needs to be scaled up by 0x101 to
2896 * match the DC interface which has a range of
2899 conversion_pace = 0x101;
2904 / AMDGPU_MAX_BL_LEVEL
2905 + min * conversion_pace;
2908 * We are doing a linear interpolation here, which is OK but
2909 * does not provide the optimal result. We probably want
2910 * something close to the Perceptual Quantizer (PQ) curve.
2912 max = caps->aux_max_input_signal;
2913 min = caps->aux_min_input_signal;
2915 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2916 + user_brightness * max;
2917 // Multiple the value by 1000 since we use millinits
2919 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2926 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2928 struct amdgpu_display_manager *dm = bl_get_data(bd);
2929 struct amdgpu_dm_backlight_caps caps;
2930 struct dc_link *link = NULL;
2934 amdgpu_dm_update_backlight_caps(dm);
2935 caps = dm->backlight_caps;
2937 link = (struct dc_link *)dm->backlight_link;
2939 brightness = convert_brightness(&caps, bd->props.brightness);
2940 // Change brightness based on AUX property
2941 if (caps.aux_support)
2942 return set_backlight_via_aux(link, brightness);
2944 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2949 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2951 struct amdgpu_display_manager *dm = bl_get_data(bd);
2952 int ret = dc_link_get_backlight_level(dm->backlight_link);
2954 if (ret == DC_ERROR_UNEXPECTED)
2955 return bd->props.brightness;
2959 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2960 .options = BL_CORE_SUSPENDRESUME,
2961 .get_brightness = amdgpu_dm_backlight_get_brightness,
2962 .update_status = amdgpu_dm_backlight_update_status,
2966 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2969 struct backlight_properties props = { 0 };
2971 amdgpu_dm_update_backlight_caps(dm);
2973 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2974 props.brightness = AMDGPU_MAX_BL_LEVEL;
2975 props.type = BACKLIGHT_RAW;
2977 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2978 dm->adev->ddev->primary->index);
2980 dm->backlight_dev = backlight_device_register(bl_name,
2981 dm->adev->ddev->dev,
2983 &amdgpu_dm_backlight_ops,
2986 if (IS_ERR(dm->backlight_dev))
2987 DRM_ERROR("DM: Backlight registration failed!\n");
2989 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2994 static int initialize_plane(struct amdgpu_display_manager *dm,
2995 struct amdgpu_mode_info *mode_info, int plane_id,
2996 enum drm_plane_type plane_type,
2997 const struct dc_plane_cap *plane_cap)
2999 struct drm_plane *plane;
3000 unsigned long possible_crtcs;
3003 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3005 DRM_ERROR("KMS: Failed to allocate plane\n");
3008 plane->type = plane_type;
3011 * HACK: IGT tests expect that the primary plane for a CRTC
3012 * can only have one possible CRTC. Only expose support for
3013 * any CRTC if they're not going to be used as a primary plane
3014 * for a CRTC - like overlay or underlay planes.
3016 possible_crtcs = 1 << plane_id;
3017 if (plane_id >= dm->dc->caps.max_streams)
3018 possible_crtcs = 0xff;
3020 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3023 DRM_ERROR("KMS: Failed to initialize plane\n");
3029 mode_info->planes[plane_id] = plane;
3035 static void register_backlight_device(struct amdgpu_display_manager *dm,
3036 struct dc_link *link)
3038 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3039 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3041 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3042 link->type != dc_connection_none) {
3044 * Event if registration failed, we should continue with
3045 * DM initialization because not having a backlight control
3046 * is better then a black screen.
3048 amdgpu_dm_register_backlight_device(dm);
3050 if (dm->backlight_dev)
3051 dm->backlight_link = link;
3058 * In this architecture, the association
3059 * connector -> encoder -> crtc
3060 * id not really requried. The crtc and connector will hold the
3061 * display_index as an abstraction to use with DAL component
3063 * Returns 0 on success
3065 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3067 struct amdgpu_display_manager *dm = &adev->dm;
3069 struct amdgpu_dm_connector *aconnector = NULL;
3070 struct amdgpu_encoder *aencoder = NULL;
3071 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3073 int32_t primary_planes;
3074 enum dc_connection_type new_connection_type = dc_connection_none;
3075 const struct dc_plane_cap *plane;
3077 link_cnt = dm->dc->caps.max_links;
3078 if (amdgpu_dm_mode_config_init(dm->adev)) {
3079 DRM_ERROR("DM: Failed to initialize mode config\n");
3083 /* There is one primary plane per CRTC */
3084 primary_planes = dm->dc->caps.max_streams;
3085 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3088 * Initialize primary planes, implicit planes for legacy IOCTLS.
3089 * Order is reversed to match iteration order in atomic check.
3091 for (i = (primary_planes - 1); i >= 0; i--) {
3092 plane = &dm->dc->caps.planes[i];
3094 if (initialize_plane(dm, mode_info, i,
3095 DRM_PLANE_TYPE_PRIMARY, plane)) {
3096 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3102 * Initialize overlay planes, index starting after primary planes.
3103 * These planes have a higher DRM index than the primary planes since
3104 * they should be considered as having a higher z-order.
3105 * Order is reversed to match iteration order in atomic check.
3107 * Only support DCN for now, and only expose one so we don't encourage
3108 * userspace to use up all the pipes.
3110 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3111 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3113 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3116 if (!plane->blends_with_above || !plane->blends_with_below)
3119 if (!plane->pixel_format_support.argb8888)
3122 if (initialize_plane(dm, NULL, primary_planes + i,
3123 DRM_PLANE_TYPE_OVERLAY, plane)) {
3124 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3128 /* Only create one overlay plane. */
3132 for (i = 0; i < dm->dc->caps.max_streams; i++)
3133 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3134 DRM_ERROR("KMS: Failed to initialize crtc\n");
3138 dm->display_indexes_num = dm->dc->caps.max_streams;
3140 /* loops over all connectors on the board */
3141 for (i = 0; i < link_cnt; i++) {
3142 struct dc_link *link = NULL;
3144 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3146 "KMS: Cannot support more than %d display indexes\n",
3147 AMDGPU_DM_MAX_DISPLAY_INDEX);
3151 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3155 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3159 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3160 DRM_ERROR("KMS: Failed to initialize encoder\n");
3164 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3165 DRM_ERROR("KMS: Failed to initialize connector\n");
3169 link = dc_get_link_at_index(dm->dc, i);
3171 if (!dc_link_detect_sink(link, &new_connection_type))
3172 DRM_ERROR("KMS: Failed to detect connector\n");
3174 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3175 emulated_link_detect(link);
3176 amdgpu_dm_update_connector_after_detect(aconnector);
3178 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3179 amdgpu_dm_update_connector_after_detect(aconnector);
3180 register_backlight_device(dm, link);
3181 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3182 amdgpu_dm_set_psr_caps(link);
3188 /* Software is initialized. Now we can register interrupt handlers. */
3189 switch (adev->asic_type) {
3199 case CHIP_POLARIS11:
3200 case CHIP_POLARIS10:
3201 case CHIP_POLARIS12:
3206 if (dce110_register_irq_handlers(dm->adev)) {
3207 DRM_ERROR("DM: Failed to initialize IRQ\n");
3211 #if defined(CONFIG_DRM_AMD_DC_DCN)
3217 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3218 case CHIP_SIENNA_CICHLID:
3220 if (dcn10_register_irq_handlers(dm->adev)) {
3221 DRM_ERROR("DM: Failed to initialize IRQ\n");
3227 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3231 /* No userspace support. */
3232 dm->dc->debug.disable_tri_buf = true;
3242 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3244 drm_mode_config_cleanup(dm->ddev);
3245 drm_atomic_private_obj_fini(&dm->atomic_obj);
3249 /******************************************************************************
3250 * amdgpu_display_funcs functions
3251 *****************************************************************************/
3254 * dm_bandwidth_update - program display watermarks
3256 * @adev: amdgpu_device pointer
3258 * Calculate and program the display watermarks and line buffer allocation.
3260 static void dm_bandwidth_update(struct amdgpu_device *adev)
3262 /* TODO: implement later */
3265 static const struct amdgpu_display_funcs dm_display_funcs = {
3266 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3267 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3268 .backlight_set_level = NULL, /* never called for DC */
3269 .backlight_get_level = NULL, /* never called for DC */
3270 .hpd_sense = NULL,/* called unconditionally */
3271 .hpd_set_polarity = NULL, /* called unconditionally */
3272 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3273 .page_flip_get_scanoutpos =
3274 dm_crtc_get_scanoutpos,/* called unconditionally */
3275 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3276 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3279 #if defined(CONFIG_DEBUG_KERNEL_DC)
3281 static ssize_t s3_debug_store(struct device *device,
3282 struct device_attribute *attr,
3288 struct drm_device *drm_dev = dev_get_drvdata(device);
3289 struct amdgpu_device *adev = drm_dev->dev_private;
3291 ret = kstrtoint(buf, 0, &s3_state);
3296 drm_kms_helper_hotplug_event(adev->ddev);
3301 return ret == 0 ? count : 0;
3304 DEVICE_ATTR_WO(s3_debug);
3308 static int dm_early_init(void *handle)
3310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3312 switch (adev->asic_type) {
3315 adev->mode_info.num_crtc = 6;
3316 adev->mode_info.num_hpd = 6;
3317 adev->mode_info.num_dig = 6;
3320 adev->mode_info.num_crtc = 4;
3321 adev->mode_info.num_hpd = 6;
3322 adev->mode_info.num_dig = 7;
3326 adev->mode_info.num_crtc = 2;
3327 adev->mode_info.num_hpd = 6;
3328 adev->mode_info.num_dig = 6;
3332 adev->mode_info.num_crtc = 6;
3333 adev->mode_info.num_hpd = 6;
3334 adev->mode_info.num_dig = 7;
3337 adev->mode_info.num_crtc = 3;
3338 adev->mode_info.num_hpd = 6;
3339 adev->mode_info.num_dig = 9;
3342 adev->mode_info.num_crtc = 2;
3343 adev->mode_info.num_hpd = 6;
3344 adev->mode_info.num_dig = 9;
3346 case CHIP_POLARIS11:
3347 case CHIP_POLARIS12:
3348 adev->mode_info.num_crtc = 5;
3349 adev->mode_info.num_hpd = 5;
3350 adev->mode_info.num_dig = 5;
3352 case CHIP_POLARIS10:
3354 adev->mode_info.num_crtc = 6;
3355 adev->mode_info.num_hpd = 6;
3356 adev->mode_info.num_dig = 6;
3361 adev->mode_info.num_crtc = 6;
3362 adev->mode_info.num_hpd = 6;
3363 adev->mode_info.num_dig = 6;
3365 #if defined(CONFIG_DRM_AMD_DC_DCN)
3367 adev->mode_info.num_crtc = 4;
3368 adev->mode_info.num_hpd = 4;
3369 adev->mode_info.num_dig = 4;
3374 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3375 case CHIP_SIENNA_CICHLID:
3377 adev->mode_info.num_crtc = 6;
3378 adev->mode_info.num_hpd = 6;
3379 adev->mode_info.num_dig = 6;
3382 adev->mode_info.num_crtc = 5;
3383 adev->mode_info.num_hpd = 5;
3384 adev->mode_info.num_dig = 5;
3387 adev->mode_info.num_crtc = 4;
3388 adev->mode_info.num_hpd = 4;
3389 adev->mode_info.num_dig = 4;
3392 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3396 amdgpu_dm_set_irq_funcs(adev);
3398 if (adev->mode_info.funcs == NULL)
3399 adev->mode_info.funcs = &dm_display_funcs;
3402 * Note: Do NOT change adev->audio_endpt_rreg and
3403 * adev->audio_endpt_wreg because they are initialised in
3404 * amdgpu_device_init()
3406 #if defined(CONFIG_DEBUG_KERNEL_DC)
3409 &dev_attr_s3_debug);
3415 static bool modeset_required(struct drm_crtc_state *crtc_state,
3416 struct dc_stream_state *new_stream,
3417 struct dc_stream_state *old_stream)
3419 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3422 if (!crtc_state->enable)
3425 return crtc_state->active;
3428 static bool modereset_required(struct drm_crtc_state *crtc_state)
3430 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3433 return !crtc_state->enable || !crtc_state->active;
3436 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3438 drm_encoder_cleanup(encoder);
3442 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3443 .destroy = amdgpu_dm_encoder_destroy,
3447 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3448 struct dc_scaling_info *scaling_info)
3450 int scale_w, scale_h;
3452 memset(scaling_info, 0, sizeof(*scaling_info));
3454 /* Source is fixed 16.16 but we ignore mantissa for now... */
3455 scaling_info->src_rect.x = state->src_x >> 16;
3456 scaling_info->src_rect.y = state->src_y >> 16;
3458 scaling_info->src_rect.width = state->src_w >> 16;
3459 if (scaling_info->src_rect.width == 0)
3462 scaling_info->src_rect.height = state->src_h >> 16;
3463 if (scaling_info->src_rect.height == 0)
3466 scaling_info->dst_rect.x = state->crtc_x;
3467 scaling_info->dst_rect.y = state->crtc_y;
3469 if (state->crtc_w == 0)
3472 scaling_info->dst_rect.width = state->crtc_w;
3474 if (state->crtc_h == 0)
3477 scaling_info->dst_rect.height = state->crtc_h;
3479 /* DRM doesn't specify clipping on destination output. */
3480 scaling_info->clip_rect = scaling_info->dst_rect;
3482 /* TODO: Validate scaling per-format with DC plane caps */
3483 scale_w = scaling_info->dst_rect.width * 1000 /
3484 scaling_info->src_rect.width;
3486 if (scale_w < 250 || scale_w > 16000)
3489 scale_h = scaling_info->dst_rect.height * 1000 /
3490 scaling_info->src_rect.height;
3492 if (scale_h < 250 || scale_h > 16000)
3496 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3497 * assume reasonable defaults based on the format.
3503 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3504 uint64_t *tiling_flags, bool *tmz_surface)
3506 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3507 int r = amdgpu_bo_reserve(rbo, false);
3510 /* Don't show error message when returning -ERESTARTSYS */
3511 if (r != -ERESTARTSYS)
3512 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3517 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3520 *tmz_surface = amdgpu_bo_encrypted(rbo);
3522 amdgpu_bo_unreserve(rbo);
3527 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3529 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3531 return offset ? (address + offset * 256) : 0;
3535 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3536 const struct amdgpu_framebuffer *afb,
3537 const enum surface_pixel_format format,
3538 const enum dc_rotation_angle rotation,
3539 const struct plane_size *plane_size,
3540 const union dc_tiling_info *tiling_info,
3541 const uint64_t info,
3542 struct dc_plane_dcc_param *dcc,
3543 struct dc_plane_address *address,
3544 bool force_disable_dcc)
3546 struct dc *dc = adev->dm.dc;
3547 struct dc_dcc_surface_param input;
3548 struct dc_surface_dcc_cap output;
3549 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3550 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3551 uint64_t dcc_address;
3553 memset(&input, 0, sizeof(input));
3554 memset(&output, 0, sizeof(output));
3556 if (force_disable_dcc)
3562 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3565 if (!dc->cap_funcs.get_dcc_compression_cap)
3568 input.format = format;
3569 input.surface_size.width = plane_size->surface_size.width;
3570 input.surface_size.height = plane_size->surface_size.height;
3571 input.swizzle_mode = tiling_info->gfx9.swizzle;
3573 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3574 input.scan = SCAN_DIRECTION_HORIZONTAL;
3575 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3576 input.scan = SCAN_DIRECTION_VERTICAL;
3578 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3581 if (!output.capable)
3584 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3589 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3590 dcc->independent_64b_blks = i64b;
3592 dcc_address = get_dcc_address(afb->address, info);
3593 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3594 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3600 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3601 const struct amdgpu_framebuffer *afb,
3602 const enum surface_pixel_format format,
3603 const enum dc_rotation_angle rotation,
3604 const uint64_t tiling_flags,
3605 union dc_tiling_info *tiling_info,
3606 struct plane_size *plane_size,
3607 struct dc_plane_dcc_param *dcc,
3608 struct dc_plane_address *address,
3610 bool force_disable_dcc)
3612 const struct drm_framebuffer *fb = &afb->base;
3615 memset(tiling_info, 0, sizeof(*tiling_info));
3616 memset(plane_size, 0, sizeof(*plane_size));
3617 memset(dcc, 0, sizeof(*dcc));
3618 memset(address, 0, sizeof(*address));
3620 address->tmz_surface = tmz_surface;
3622 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3623 plane_size->surface_size.x = 0;
3624 plane_size->surface_size.y = 0;
3625 plane_size->surface_size.width = fb->width;
3626 plane_size->surface_size.height = fb->height;
3627 plane_size->surface_pitch =
3628 fb->pitches[0] / fb->format->cpp[0];
3630 address->type = PLN_ADDR_TYPE_GRAPHICS;
3631 address->grph.addr.low_part = lower_32_bits(afb->address);
3632 address->grph.addr.high_part = upper_32_bits(afb->address);
3633 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3634 uint64_t chroma_addr = afb->address + fb->offsets[1];
3636 plane_size->surface_size.x = 0;
3637 plane_size->surface_size.y = 0;
3638 plane_size->surface_size.width = fb->width;
3639 plane_size->surface_size.height = fb->height;
3640 plane_size->surface_pitch =
3641 fb->pitches[0] / fb->format->cpp[0];
3643 plane_size->chroma_size.x = 0;
3644 plane_size->chroma_size.y = 0;
3645 /* TODO: set these based on surface format */
3646 plane_size->chroma_size.width = fb->width / 2;
3647 plane_size->chroma_size.height = fb->height / 2;
3649 plane_size->chroma_pitch =
3650 fb->pitches[1] / fb->format->cpp[1];
3652 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3653 address->video_progressive.luma_addr.low_part =
3654 lower_32_bits(afb->address);
3655 address->video_progressive.luma_addr.high_part =
3656 upper_32_bits(afb->address);
3657 address->video_progressive.chroma_addr.low_part =
3658 lower_32_bits(chroma_addr);
3659 address->video_progressive.chroma_addr.high_part =
3660 upper_32_bits(chroma_addr);
3663 /* Fill GFX8 params */
3664 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3665 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3667 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3668 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3669 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3670 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3671 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3673 /* XXX fix me for VI */
3674 tiling_info->gfx8.num_banks = num_banks;
3675 tiling_info->gfx8.array_mode =
3676 DC_ARRAY_2D_TILED_THIN1;
3677 tiling_info->gfx8.tile_split = tile_split;
3678 tiling_info->gfx8.bank_width = bankw;
3679 tiling_info->gfx8.bank_height = bankh;
3680 tiling_info->gfx8.tile_aspect = mtaspect;
3681 tiling_info->gfx8.tile_mode =
3682 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3683 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3684 == DC_ARRAY_1D_TILED_THIN1) {
3685 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3688 tiling_info->gfx8.pipe_config =
3689 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3691 if (adev->asic_type == CHIP_VEGA10 ||
3692 adev->asic_type == CHIP_VEGA12 ||
3693 adev->asic_type == CHIP_VEGA20 ||
3694 adev->asic_type == CHIP_NAVI10 ||
3695 adev->asic_type == CHIP_NAVI14 ||
3696 adev->asic_type == CHIP_NAVI12 ||
3697 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3698 adev->asic_type == CHIP_SIENNA_CICHLID ||
3700 adev->asic_type == CHIP_RENOIR ||
3701 adev->asic_type == CHIP_RAVEN) {
3702 /* Fill GFX9 params */
3703 tiling_info->gfx9.num_pipes =
3704 adev->gfx.config.gb_addr_config_fields.num_pipes;
3705 tiling_info->gfx9.num_banks =
3706 adev->gfx.config.gb_addr_config_fields.num_banks;
3707 tiling_info->gfx9.pipe_interleave =
3708 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3709 tiling_info->gfx9.num_shader_engines =
3710 adev->gfx.config.gb_addr_config_fields.num_se;
3711 tiling_info->gfx9.max_compressed_frags =
3712 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3713 tiling_info->gfx9.num_rb_per_se =
3714 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3715 tiling_info->gfx9.swizzle =
3716 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3717 tiling_info->gfx9.shaderEnable = 1;
3719 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3720 if (adev->asic_type == CHIP_SIENNA_CICHLID)
3721 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3724 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3725 plane_size, tiling_info,
3726 tiling_flags, dcc, address,
3736 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3737 bool *per_pixel_alpha, bool *global_alpha,
3738 int *global_alpha_value)
3740 *per_pixel_alpha = false;
3741 *global_alpha = false;
3742 *global_alpha_value = 0xff;
3744 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3747 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3748 static const uint32_t alpha_formats[] = {
3749 DRM_FORMAT_ARGB8888,
3750 DRM_FORMAT_RGBA8888,
3751 DRM_FORMAT_ABGR8888,
3753 uint32_t format = plane_state->fb->format->format;
3756 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3757 if (format == alpha_formats[i]) {
3758 *per_pixel_alpha = true;
3764 if (plane_state->alpha < 0xffff) {
3765 *global_alpha = true;
3766 *global_alpha_value = plane_state->alpha >> 8;
3771 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3772 const enum surface_pixel_format format,
3773 enum dc_color_space *color_space)
3777 *color_space = COLOR_SPACE_SRGB;
3779 /* DRM color properties only affect non-RGB formats. */
3780 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3783 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3785 switch (plane_state->color_encoding) {
3786 case DRM_COLOR_YCBCR_BT601:
3788 *color_space = COLOR_SPACE_YCBCR601;
3790 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3793 case DRM_COLOR_YCBCR_BT709:
3795 *color_space = COLOR_SPACE_YCBCR709;
3797 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3800 case DRM_COLOR_YCBCR_BT2020:
3802 *color_space = COLOR_SPACE_2020_YCBCR;
3815 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3816 const struct drm_plane_state *plane_state,
3817 const uint64_t tiling_flags,
3818 struct dc_plane_info *plane_info,
3819 struct dc_plane_address *address,
3821 bool force_disable_dcc)
3823 const struct drm_framebuffer *fb = plane_state->fb;
3824 const struct amdgpu_framebuffer *afb =
3825 to_amdgpu_framebuffer(plane_state->fb);
3826 struct drm_format_name_buf format_name;
3829 memset(plane_info, 0, sizeof(*plane_info));
3831 switch (fb->format->format) {
3833 plane_info->format =
3834 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3836 case DRM_FORMAT_RGB565:
3837 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3839 case DRM_FORMAT_XRGB8888:
3840 case DRM_FORMAT_ARGB8888:
3841 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3843 case DRM_FORMAT_XRGB2101010:
3844 case DRM_FORMAT_ARGB2101010:
3845 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3847 case DRM_FORMAT_XBGR2101010:
3848 case DRM_FORMAT_ABGR2101010:
3849 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3851 case DRM_FORMAT_XBGR8888:
3852 case DRM_FORMAT_ABGR8888:
3853 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3855 case DRM_FORMAT_NV21:
3856 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3858 case DRM_FORMAT_NV12:
3859 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3861 case DRM_FORMAT_P010:
3862 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3864 case DRM_FORMAT_XRGB16161616F:
3865 case DRM_FORMAT_ARGB16161616F:
3866 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3868 case DRM_FORMAT_XBGR16161616F:
3869 case DRM_FORMAT_ABGR16161616F:
3870 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3874 "Unsupported screen format %s\n",
3875 drm_get_format_name(fb->format->format, &format_name));
3879 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3880 case DRM_MODE_ROTATE_0:
3881 plane_info->rotation = ROTATION_ANGLE_0;
3883 case DRM_MODE_ROTATE_90:
3884 plane_info->rotation = ROTATION_ANGLE_90;
3886 case DRM_MODE_ROTATE_180:
3887 plane_info->rotation = ROTATION_ANGLE_180;
3889 case DRM_MODE_ROTATE_270:
3890 plane_info->rotation = ROTATION_ANGLE_270;
3893 plane_info->rotation = ROTATION_ANGLE_0;
3897 plane_info->visible = true;
3898 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3900 plane_info->layer_index = 0;
3902 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3903 &plane_info->color_space);
3907 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3908 plane_info->rotation, tiling_flags,
3909 &plane_info->tiling_info,
3910 &plane_info->plane_size,
3911 &plane_info->dcc, address, tmz_surface,
3916 fill_blending_from_plane_state(
3917 plane_state, &plane_info->per_pixel_alpha,
3918 &plane_info->global_alpha, &plane_info->global_alpha_value);
3923 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3924 struct dc_plane_state *dc_plane_state,
3925 struct drm_plane_state *plane_state,
3926 struct drm_crtc_state *crtc_state)
3928 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3929 const struct amdgpu_framebuffer *amdgpu_fb =
3930 to_amdgpu_framebuffer(plane_state->fb);
3931 struct dc_scaling_info scaling_info;
3932 struct dc_plane_info plane_info;
3933 uint64_t tiling_flags;
3935 bool tmz_surface = false;
3936 bool force_disable_dcc = false;
3938 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3942 dc_plane_state->src_rect = scaling_info.src_rect;
3943 dc_plane_state->dst_rect = scaling_info.dst_rect;
3944 dc_plane_state->clip_rect = scaling_info.clip_rect;
3945 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3947 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3951 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3952 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3954 &dc_plane_state->address,
3960 dc_plane_state->format = plane_info.format;
3961 dc_plane_state->color_space = plane_info.color_space;
3962 dc_plane_state->format = plane_info.format;
3963 dc_plane_state->plane_size = plane_info.plane_size;
3964 dc_plane_state->rotation = plane_info.rotation;
3965 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3966 dc_plane_state->stereo_format = plane_info.stereo_format;
3967 dc_plane_state->tiling_info = plane_info.tiling_info;
3968 dc_plane_state->visible = plane_info.visible;
3969 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3970 dc_plane_state->global_alpha = plane_info.global_alpha;
3971 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3972 dc_plane_state->dcc = plane_info.dcc;
3973 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3976 * Always set input transfer function, since plane state is refreshed
3979 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3986 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3987 const struct dm_connector_state *dm_state,
3988 struct dc_stream_state *stream)
3990 enum amdgpu_rmx_type rmx_type;
3992 struct rect src = { 0 }; /* viewport in composition space*/
3993 struct rect dst = { 0 }; /* stream addressable area */
3995 /* no mode. nothing to be done */
3999 /* Full screen scaling by default */
4000 src.width = mode->hdisplay;
4001 src.height = mode->vdisplay;
4002 dst.width = stream->timing.h_addressable;
4003 dst.height = stream->timing.v_addressable;
4006 rmx_type = dm_state->scaling;
4007 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4008 if (src.width * dst.height <
4009 src.height * dst.width) {
4010 /* height needs less upscaling/more downscaling */
4011 dst.width = src.width *
4012 dst.height / src.height;
4014 /* width needs less upscaling/more downscaling */
4015 dst.height = src.height *
4016 dst.width / src.width;
4018 } else if (rmx_type == RMX_CENTER) {
4022 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4023 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4025 if (dm_state->underscan_enable) {
4026 dst.x += dm_state->underscan_hborder / 2;
4027 dst.y += dm_state->underscan_vborder / 2;
4028 dst.width -= dm_state->underscan_hborder;
4029 dst.height -= dm_state->underscan_vborder;
4036 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4037 dst.x, dst.y, dst.width, dst.height);
4041 static enum dc_color_depth
4042 convert_color_depth_from_display_info(const struct drm_connector *connector,
4043 bool is_y420, int requested_bpc)
4050 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4051 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4053 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4055 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4058 bpc = (uint8_t)connector->display_info.bpc;
4059 /* Assume 8 bpc by default if no bpc is specified. */
4060 bpc = bpc ? bpc : 8;
4063 if (requested_bpc > 0) {
4065 * Cap display bpc based on the user requested value.
4067 * The value for state->max_bpc may not correctly updated
4068 * depending on when the connector gets added to the state
4069 * or if this was called outside of atomic check, so it
4070 * can't be used directly.
4072 bpc = min_t(u8, bpc, requested_bpc);
4074 /* Round down to the nearest even number. */
4075 bpc = bpc - (bpc & 1);
4081 * Temporary Work around, DRM doesn't parse color depth for
4082 * EDID revision before 1.4
4083 * TODO: Fix edid parsing
4085 return COLOR_DEPTH_888;
4087 return COLOR_DEPTH_666;
4089 return COLOR_DEPTH_888;
4091 return COLOR_DEPTH_101010;
4093 return COLOR_DEPTH_121212;
4095 return COLOR_DEPTH_141414;
4097 return COLOR_DEPTH_161616;
4099 return COLOR_DEPTH_UNDEFINED;
4103 static enum dc_aspect_ratio
4104 get_aspect_ratio(const struct drm_display_mode *mode_in)
4106 /* 1-1 mapping, since both enums follow the HDMI spec. */
4107 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4110 static enum dc_color_space
4111 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4113 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4115 switch (dc_crtc_timing->pixel_encoding) {
4116 case PIXEL_ENCODING_YCBCR422:
4117 case PIXEL_ENCODING_YCBCR444:
4118 case PIXEL_ENCODING_YCBCR420:
4121 * 27030khz is the separation point between HDTV and SDTV
4122 * according to HDMI spec, we use YCbCr709 and YCbCr601
4125 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4126 if (dc_crtc_timing->flags.Y_ONLY)
4128 COLOR_SPACE_YCBCR709_LIMITED;
4130 color_space = COLOR_SPACE_YCBCR709;
4132 if (dc_crtc_timing->flags.Y_ONLY)
4134 COLOR_SPACE_YCBCR601_LIMITED;
4136 color_space = COLOR_SPACE_YCBCR601;
4141 case PIXEL_ENCODING_RGB:
4142 color_space = COLOR_SPACE_SRGB;
4153 static bool adjust_colour_depth_from_display_info(
4154 struct dc_crtc_timing *timing_out,
4155 const struct drm_display_info *info)
4157 enum dc_color_depth depth = timing_out->display_color_depth;
4160 normalized_clk = timing_out->pix_clk_100hz / 10;
4161 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4162 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4163 normalized_clk /= 2;
4164 /* Adjusting pix clock following on HDMI spec based on colour depth */
4166 case COLOR_DEPTH_888:
4168 case COLOR_DEPTH_101010:
4169 normalized_clk = (normalized_clk * 30) / 24;
4171 case COLOR_DEPTH_121212:
4172 normalized_clk = (normalized_clk * 36) / 24;
4174 case COLOR_DEPTH_161616:
4175 normalized_clk = (normalized_clk * 48) / 24;
4178 /* The above depths are the only ones valid for HDMI. */
4181 if (normalized_clk <= info->max_tmds_clock) {
4182 timing_out->display_color_depth = depth;
4185 } while (--depth > COLOR_DEPTH_666);
4189 static void fill_stream_properties_from_drm_display_mode(
4190 struct dc_stream_state *stream,
4191 const struct drm_display_mode *mode_in,
4192 const struct drm_connector *connector,
4193 const struct drm_connector_state *connector_state,
4194 const struct dc_stream_state *old_stream,
4197 struct dc_crtc_timing *timing_out = &stream->timing;
4198 const struct drm_display_info *info = &connector->display_info;
4199 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4200 struct hdmi_vendor_infoframe hv_frame;
4201 struct hdmi_avi_infoframe avi_frame;
4203 memset(&hv_frame, 0, sizeof(hv_frame));
4204 memset(&avi_frame, 0, sizeof(avi_frame));
4206 timing_out->h_border_left = 0;
4207 timing_out->h_border_right = 0;
4208 timing_out->v_border_top = 0;
4209 timing_out->v_border_bottom = 0;
4210 /* TODO: un-hardcode */
4211 if (drm_mode_is_420_only(info, mode_in)
4212 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4213 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4214 else if (drm_mode_is_420_also(info, mode_in)
4215 && aconnector->force_yuv420_output)
4216 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4217 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4218 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4219 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4221 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4223 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4224 timing_out->display_color_depth = convert_color_depth_from_display_info(
4226 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4228 timing_out->scan_type = SCANNING_TYPE_NODATA;
4229 timing_out->hdmi_vic = 0;
4232 timing_out->vic = old_stream->timing.vic;
4233 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4234 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4236 timing_out->vic = drm_match_cea_mode(mode_in);
4237 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4238 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4239 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4240 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4243 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4244 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4245 timing_out->vic = avi_frame.video_code;
4246 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4247 timing_out->hdmi_vic = hv_frame.vic;
4250 timing_out->h_addressable = mode_in->crtc_hdisplay;
4251 timing_out->h_total = mode_in->crtc_htotal;
4252 timing_out->h_sync_width =
4253 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4254 timing_out->h_front_porch =
4255 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4256 timing_out->v_total = mode_in->crtc_vtotal;
4257 timing_out->v_addressable = mode_in->crtc_vdisplay;
4258 timing_out->v_front_porch =
4259 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4260 timing_out->v_sync_width =
4261 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4262 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4263 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4265 stream->output_color_space = get_output_color_space(timing_out);
4267 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4268 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4269 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4270 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4271 drm_mode_is_420_also(info, mode_in) &&
4272 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4273 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4274 adjust_colour_depth_from_display_info(timing_out, info);
4279 static void fill_audio_info(struct audio_info *audio_info,
4280 const struct drm_connector *drm_connector,
4281 const struct dc_sink *dc_sink)
4284 int cea_revision = 0;
4285 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4287 audio_info->manufacture_id = edid_caps->manufacturer_id;
4288 audio_info->product_id = edid_caps->product_id;
4290 cea_revision = drm_connector->display_info.cea_rev;
4292 strscpy(audio_info->display_name,
4293 edid_caps->display_name,
4294 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4296 if (cea_revision >= 3) {
4297 audio_info->mode_count = edid_caps->audio_mode_count;
4299 for (i = 0; i < audio_info->mode_count; ++i) {
4300 audio_info->modes[i].format_code =
4301 (enum audio_format_code)
4302 (edid_caps->audio_modes[i].format_code);
4303 audio_info->modes[i].channel_count =
4304 edid_caps->audio_modes[i].channel_count;
4305 audio_info->modes[i].sample_rates.all =
4306 edid_caps->audio_modes[i].sample_rate;
4307 audio_info->modes[i].sample_size =
4308 edid_caps->audio_modes[i].sample_size;
4312 audio_info->flags.all = edid_caps->speaker_flags;
4314 /* TODO: We only check for the progressive mode, check for interlace mode too */
4315 if (drm_connector->latency_present[0]) {
4316 audio_info->video_latency = drm_connector->video_latency[0];
4317 audio_info->audio_latency = drm_connector->audio_latency[0];
4320 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4325 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4326 struct drm_display_mode *dst_mode)
4328 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4329 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4330 dst_mode->crtc_clock = src_mode->crtc_clock;
4331 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4332 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4333 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4334 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4335 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4336 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4337 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4338 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4339 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4340 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4341 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4345 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4346 const struct drm_display_mode *native_mode,
4349 if (scale_enabled) {
4350 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4351 } else if (native_mode->clock == drm_mode->clock &&
4352 native_mode->htotal == drm_mode->htotal &&
4353 native_mode->vtotal == drm_mode->vtotal) {
4354 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4356 /* no scaling nor amdgpu inserted, no need to patch */
4360 static struct dc_sink *
4361 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4363 struct dc_sink_init_data sink_init_data = { 0 };
4364 struct dc_sink *sink = NULL;
4365 sink_init_data.link = aconnector->dc_link;
4366 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4368 sink = dc_sink_create(&sink_init_data);
4370 DRM_ERROR("Failed to create sink!\n");
4373 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4378 static void set_multisync_trigger_params(
4379 struct dc_stream_state *stream)
4381 if (stream->triggered_crtc_reset.enabled) {
4382 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4383 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4387 static void set_master_stream(struct dc_stream_state *stream_set[],
4390 int j, highest_rfr = 0, master_stream = 0;
4392 for (j = 0; j < stream_count; j++) {
4393 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4394 int refresh_rate = 0;
4396 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4397 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4398 if (refresh_rate > highest_rfr) {
4399 highest_rfr = refresh_rate;
4404 for (j = 0; j < stream_count; j++) {
4406 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4410 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4414 if (context->stream_count < 2)
4416 for (i = 0; i < context->stream_count ; i++) {
4417 if (!context->streams[i])
4420 * TODO: add a function to read AMD VSDB bits and set
4421 * crtc_sync_master.multi_sync_enabled flag
4422 * For now it's set to false
4424 set_multisync_trigger_params(context->streams[i]);
4426 set_master_stream(context->streams, context->stream_count);
4429 static struct dc_stream_state *
4430 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4431 const struct drm_display_mode *drm_mode,
4432 const struct dm_connector_state *dm_state,
4433 const struct dc_stream_state *old_stream,
4436 struct drm_display_mode *preferred_mode = NULL;
4437 struct drm_connector *drm_connector;
4438 const struct drm_connector_state *con_state =
4439 dm_state ? &dm_state->base : NULL;
4440 struct dc_stream_state *stream = NULL;
4441 struct drm_display_mode mode = *drm_mode;
4442 bool native_mode_found = false;
4443 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4445 int preferred_refresh = 0;
4446 #if defined(CONFIG_DRM_AMD_DC_DCN)
4447 struct dsc_dec_dpcd_caps dsc_caps;
4449 uint32_t link_bandwidth_kbps;
4451 struct dc_sink *sink = NULL;
4452 if (aconnector == NULL) {
4453 DRM_ERROR("aconnector is NULL!\n");
4457 drm_connector = &aconnector->base;
4459 if (!aconnector->dc_sink) {
4460 sink = create_fake_sink(aconnector);
4464 sink = aconnector->dc_sink;
4465 dc_sink_retain(sink);
4468 stream = dc_create_stream_for_sink(sink);
4470 if (stream == NULL) {
4471 DRM_ERROR("Failed to create stream for sink!\n");
4475 stream->dm_stream_context = aconnector;
4477 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4478 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4480 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4481 /* Search for preferred mode */
4482 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4483 native_mode_found = true;
4487 if (!native_mode_found)
4488 preferred_mode = list_first_entry_or_null(
4489 &aconnector->base.modes,
4490 struct drm_display_mode,
4493 mode_refresh = drm_mode_vrefresh(&mode);
4495 if (preferred_mode == NULL) {
4497 * This may not be an error, the use case is when we have no
4498 * usermode calls to reset and set mode upon hotplug. In this
4499 * case, we call set mode ourselves to restore the previous mode
4500 * and the modelist may not be filled in in time.
4502 DRM_DEBUG_DRIVER("No preferred mode found\n");
4504 decide_crtc_timing_for_drm_display_mode(
4505 &mode, preferred_mode,
4506 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4507 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4511 drm_mode_set_crtcinfo(&mode, 0);
4514 * If scaling is enabled and refresh rate didn't change
4515 * we copy the vic and polarities of the old timings
4517 if (!scale || mode_refresh != preferred_refresh)
4518 fill_stream_properties_from_drm_display_mode(stream,
4519 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4521 fill_stream_properties_from_drm_display_mode(stream,
4522 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4524 stream->timing.flags.DSC = 0;
4526 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4527 #if defined(CONFIG_DRM_AMD_DC_DCN)
4528 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4529 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4530 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4533 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4534 dc_link_get_link_cap(aconnector->dc_link));
4536 #if defined(CONFIG_DRM_AMD_DC_DCN)
4537 if (dsc_caps.is_dsc_supported)
4538 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4540 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4541 link_bandwidth_kbps,
4543 &stream->timing.dsc_cfg))
4544 stream->timing.flags.DSC = 1;
4548 update_stream_scaling_settings(&mode, dm_state, stream);
4551 &stream->audio_info,
4555 update_stream_signal(stream, sink);
4557 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4558 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4559 if (stream->link->psr_settings.psr_feature_enabled) {
4560 struct dc *core_dc = stream->link->ctx->dc;
4562 if (dc_is_dmcu_initialized(core_dc)) {
4564 // should decide stream support vsc sdp colorimetry capability
4565 // before building vsc info packet
4567 stream->use_vsc_sdp_for_colorimetry = false;
4568 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4569 stream->use_vsc_sdp_for_colorimetry =
4570 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4572 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4573 stream->use_vsc_sdp_for_colorimetry = true;
4575 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4579 dc_sink_release(sink);
4584 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4586 drm_crtc_cleanup(crtc);
4590 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4591 struct drm_crtc_state *state)
4593 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4595 /* TODO Destroy dc_stream objects are stream object is flattened */
4597 dc_stream_release(cur->stream);
4600 __drm_atomic_helper_crtc_destroy_state(state);
4606 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4608 struct dm_crtc_state *state;
4611 dm_crtc_destroy_state(crtc, crtc->state);
4613 state = kzalloc(sizeof(*state), GFP_KERNEL);
4614 if (WARN_ON(!state))
4617 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4620 static struct drm_crtc_state *
4621 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4623 struct dm_crtc_state *state, *cur;
4625 cur = to_dm_crtc_state(crtc->state);
4627 if (WARN_ON(!crtc->state))
4630 state = kzalloc(sizeof(*state), GFP_KERNEL);
4634 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4637 state->stream = cur->stream;
4638 dc_stream_retain(state->stream);
4641 state->active_planes = cur->active_planes;
4642 state->interrupts_enabled = cur->interrupts_enabled;
4643 state->vrr_params = cur->vrr_params;
4644 state->vrr_infopacket = cur->vrr_infopacket;
4645 state->abm_level = cur->abm_level;
4646 state->vrr_supported = cur->vrr_supported;
4647 state->freesync_config = cur->freesync_config;
4648 state->crc_src = cur->crc_src;
4649 state->cm_has_degamma = cur->cm_has_degamma;
4650 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4652 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4654 return &state->base;
4657 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4659 enum dc_irq_source irq_source;
4660 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4661 struct amdgpu_device *adev = crtc->dev->dev_private;
4664 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4666 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4668 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4669 acrtc->crtc_id, enable ? "en" : "dis", rc);
4673 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4675 enum dc_irq_source irq_source;
4676 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4677 struct amdgpu_device *adev = crtc->dev->dev_private;
4678 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4682 /* vblank irq on -> Only need vupdate irq in vrr mode */
4683 if (amdgpu_dm_vrr_active(acrtc_state))
4684 rc = dm_set_vupdate_irq(crtc, true);
4686 /* vblank irq off -> vupdate irq off */
4687 rc = dm_set_vupdate_irq(crtc, false);
4693 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4694 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4697 static int dm_enable_vblank(struct drm_crtc *crtc)
4699 return dm_set_vblank(crtc, true);
4702 static void dm_disable_vblank(struct drm_crtc *crtc)
4704 dm_set_vblank(crtc, false);
4707 /* Implemented only the options currently availible for the driver */
4708 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4709 .reset = dm_crtc_reset_state,
4710 .destroy = amdgpu_dm_crtc_destroy,
4711 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4712 .set_config = drm_atomic_helper_set_config,
4713 .page_flip = drm_atomic_helper_page_flip,
4714 .atomic_duplicate_state = dm_crtc_duplicate_state,
4715 .atomic_destroy_state = dm_crtc_destroy_state,
4716 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4717 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4718 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4719 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4720 .enable_vblank = dm_enable_vblank,
4721 .disable_vblank = dm_disable_vblank,
4722 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4725 static enum drm_connector_status
4726 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4729 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4733 * 1. This interface is NOT called in context of HPD irq.
4734 * 2. This interface *is called* in context of user-mode ioctl. Which
4735 * makes it a bad place for *any* MST-related activity.
4738 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4739 !aconnector->fake_enable)
4740 connected = (aconnector->dc_sink != NULL);
4742 connected = (aconnector->base.force == DRM_FORCE_ON);
4744 return (connected ? connector_status_connected :
4745 connector_status_disconnected);
4748 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4749 struct drm_connector_state *connector_state,
4750 struct drm_property *property,
4753 struct drm_device *dev = connector->dev;
4754 struct amdgpu_device *adev = dev->dev_private;
4755 struct dm_connector_state *dm_old_state =
4756 to_dm_connector_state(connector->state);
4757 struct dm_connector_state *dm_new_state =
4758 to_dm_connector_state(connector_state);
4762 if (property == dev->mode_config.scaling_mode_property) {
4763 enum amdgpu_rmx_type rmx_type;
4766 case DRM_MODE_SCALE_CENTER:
4767 rmx_type = RMX_CENTER;
4769 case DRM_MODE_SCALE_ASPECT:
4770 rmx_type = RMX_ASPECT;
4772 case DRM_MODE_SCALE_FULLSCREEN:
4773 rmx_type = RMX_FULL;
4775 case DRM_MODE_SCALE_NONE:
4781 if (dm_old_state->scaling == rmx_type)
4784 dm_new_state->scaling = rmx_type;
4786 } else if (property == adev->mode_info.underscan_hborder_property) {
4787 dm_new_state->underscan_hborder = val;
4789 } else if (property == adev->mode_info.underscan_vborder_property) {
4790 dm_new_state->underscan_vborder = val;
4792 } else if (property == adev->mode_info.underscan_property) {
4793 dm_new_state->underscan_enable = val;
4795 } else if (property == adev->mode_info.abm_level_property) {
4796 dm_new_state->abm_level = val;
4803 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4804 const struct drm_connector_state *state,
4805 struct drm_property *property,
4808 struct drm_device *dev = connector->dev;
4809 struct amdgpu_device *adev = dev->dev_private;
4810 struct dm_connector_state *dm_state =
4811 to_dm_connector_state(state);
4814 if (property == dev->mode_config.scaling_mode_property) {
4815 switch (dm_state->scaling) {
4817 *val = DRM_MODE_SCALE_CENTER;
4820 *val = DRM_MODE_SCALE_ASPECT;
4823 *val = DRM_MODE_SCALE_FULLSCREEN;
4827 *val = DRM_MODE_SCALE_NONE;
4831 } else if (property == adev->mode_info.underscan_hborder_property) {
4832 *val = dm_state->underscan_hborder;
4834 } else if (property == adev->mode_info.underscan_vborder_property) {
4835 *val = dm_state->underscan_vborder;
4837 } else if (property == adev->mode_info.underscan_property) {
4838 *val = dm_state->underscan_enable;
4840 } else if (property == adev->mode_info.abm_level_property) {
4841 *val = dm_state->abm_level;
4848 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4850 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4852 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4855 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4857 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4858 const struct dc_link *link = aconnector->dc_link;
4859 struct amdgpu_device *adev = connector->dev->dev_private;
4860 struct amdgpu_display_manager *dm = &adev->dm;
4862 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4863 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4865 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4866 link->type != dc_connection_none &&
4867 dm->backlight_dev) {
4868 backlight_device_unregister(dm->backlight_dev);
4869 dm->backlight_dev = NULL;
4873 if (aconnector->dc_em_sink)
4874 dc_sink_release(aconnector->dc_em_sink);
4875 aconnector->dc_em_sink = NULL;
4876 if (aconnector->dc_sink)
4877 dc_sink_release(aconnector->dc_sink);
4878 aconnector->dc_sink = NULL;
4880 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4881 drm_connector_unregister(connector);
4882 drm_connector_cleanup(connector);
4883 if (aconnector->i2c) {
4884 i2c_del_adapter(&aconnector->i2c->base);
4885 kfree(aconnector->i2c);
4887 kfree(aconnector->dm_dp_aux.aux.name);
4892 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4894 struct dm_connector_state *state =
4895 to_dm_connector_state(connector->state);
4897 if (connector->state)
4898 __drm_atomic_helper_connector_destroy_state(connector->state);
4902 state = kzalloc(sizeof(*state), GFP_KERNEL);
4905 state->scaling = RMX_OFF;
4906 state->underscan_enable = false;
4907 state->underscan_hborder = 0;
4908 state->underscan_vborder = 0;
4909 state->base.max_requested_bpc = 8;
4910 state->vcpi_slots = 0;
4912 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4913 state->abm_level = amdgpu_dm_abm_level;
4915 __drm_atomic_helper_connector_reset(connector, &state->base);
4919 struct drm_connector_state *
4920 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4922 struct dm_connector_state *state =
4923 to_dm_connector_state(connector->state);
4925 struct dm_connector_state *new_state =
4926 kmemdup(state, sizeof(*state), GFP_KERNEL);
4931 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4933 new_state->freesync_capable = state->freesync_capable;
4934 new_state->abm_level = state->abm_level;
4935 new_state->scaling = state->scaling;
4936 new_state->underscan_enable = state->underscan_enable;
4937 new_state->underscan_hborder = state->underscan_hborder;
4938 new_state->underscan_vborder = state->underscan_vborder;
4939 new_state->vcpi_slots = state->vcpi_slots;
4940 new_state->pbn = state->pbn;
4941 return &new_state->base;
4945 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4947 struct amdgpu_dm_connector *amdgpu_dm_connector =
4948 to_amdgpu_dm_connector(connector);
4951 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4952 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4953 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4954 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4959 #if defined(CONFIG_DEBUG_FS)
4960 connector_debugfs_init(amdgpu_dm_connector);
4966 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4967 .reset = amdgpu_dm_connector_funcs_reset,
4968 .detect = amdgpu_dm_connector_detect,
4969 .fill_modes = drm_helper_probe_single_connector_modes,
4970 .destroy = amdgpu_dm_connector_destroy,
4971 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4972 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4973 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4974 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4975 .late_register = amdgpu_dm_connector_late_register,
4976 .early_unregister = amdgpu_dm_connector_unregister
4979 static int get_modes(struct drm_connector *connector)
4981 return amdgpu_dm_connector_get_modes(connector);
4984 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4986 struct dc_sink_init_data init_params = {
4987 .link = aconnector->dc_link,
4988 .sink_signal = SIGNAL_TYPE_VIRTUAL
4992 if (!aconnector->base.edid_blob_ptr) {
4993 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4994 aconnector->base.name);
4996 aconnector->base.force = DRM_FORCE_OFF;
4997 aconnector->base.override_edid = false;
5001 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5003 aconnector->edid = edid;
5005 aconnector->dc_em_sink = dc_link_add_remote_sink(
5006 aconnector->dc_link,
5008 (edid->extensions + 1) * EDID_LENGTH,
5011 if (aconnector->base.force == DRM_FORCE_ON) {
5012 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5013 aconnector->dc_link->local_sink :
5014 aconnector->dc_em_sink;
5015 dc_sink_retain(aconnector->dc_sink);
5019 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5021 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5024 * In case of headless boot with force on for DP managed connector
5025 * Those settings have to be != 0 to get initial modeset
5027 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5028 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5029 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5033 aconnector->base.override_edid = true;
5034 create_eml_sink(aconnector);
5037 static struct dc_stream_state *
5038 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5039 const struct drm_display_mode *drm_mode,
5040 const struct dm_connector_state *dm_state,
5041 const struct dc_stream_state *old_stream)
5043 struct drm_connector *connector = &aconnector->base;
5044 struct amdgpu_device *adev = connector->dev->dev_private;
5045 struct dc_stream_state *stream;
5046 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5047 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5048 enum dc_status dc_result = DC_OK;
5051 stream = create_stream_for_sink(aconnector, drm_mode,
5052 dm_state, old_stream,
5054 if (stream == NULL) {
5055 DRM_ERROR("Failed to create stream for sink!\n");
5059 dc_result = dc_validate_stream(adev->dm.dc, stream);
5061 if (dc_result != DC_OK) {
5062 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5067 dc_status_to_str(dc_result));
5069 dc_stream_release(stream);
5071 requested_bpc -= 2; /* lower bpc to retry validation */
5074 } while (stream == NULL && requested_bpc >= 6);
5079 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5080 struct drm_display_mode *mode)
5082 int result = MODE_ERROR;
5083 struct dc_sink *dc_sink;
5084 /* TODO: Unhardcode stream count */
5085 struct dc_stream_state *stream;
5086 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5088 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5089 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5093 * Only run this the first time mode_valid is called to initilialize
5096 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5097 !aconnector->dc_em_sink)
5098 handle_edid_mgmt(aconnector);
5100 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5102 if (dc_sink == NULL) {
5103 DRM_ERROR("dc_sink is NULL!\n");
5107 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5109 dc_stream_release(stream);
5114 /* TODO: error handling*/
5118 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5119 struct dc_info_packet *out)
5121 struct hdmi_drm_infoframe frame;
5122 unsigned char buf[30]; /* 26 + 4 */
5126 memset(out, 0, sizeof(*out));
5128 if (!state->hdr_output_metadata)
5131 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5135 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5139 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5143 /* Prepare the infopacket for DC. */
5144 switch (state->connector->connector_type) {
5145 case DRM_MODE_CONNECTOR_HDMIA:
5146 out->hb0 = 0x87; /* type */
5147 out->hb1 = 0x01; /* version */
5148 out->hb2 = 0x1A; /* length */
5149 out->sb[0] = buf[3]; /* checksum */
5153 case DRM_MODE_CONNECTOR_DisplayPort:
5154 case DRM_MODE_CONNECTOR_eDP:
5155 out->hb0 = 0x00; /* sdp id, zero */
5156 out->hb1 = 0x87; /* type */
5157 out->hb2 = 0x1D; /* payload len - 1 */
5158 out->hb3 = (0x13 << 2); /* sdp version */
5159 out->sb[0] = 0x01; /* version */
5160 out->sb[1] = 0x1A; /* length */
5168 memcpy(&out->sb[i], &buf[4], 26);
5171 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5172 sizeof(out->sb), false);
5178 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5179 const struct drm_connector_state *new_state)
5181 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5182 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5184 if (old_blob != new_blob) {
5185 if (old_blob && new_blob &&
5186 old_blob->length == new_blob->length)
5187 return memcmp(old_blob->data, new_blob->data,
5197 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5198 struct drm_atomic_state *state)
5200 struct drm_connector_state *new_con_state =
5201 drm_atomic_get_new_connector_state(state, conn);
5202 struct drm_connector_state *old_con_state =
5203 drm_atomic_get_old_connector_state(state, conn);
5204 struct drm_crtc *crtc = new_con_state->crtc;
5205 struct drm_crtc_state *new_crtc_state;
5211 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5212 struct dc_info_packet hdr_infopacket;
5214 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5218 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5219 if (IS_ERR(new_crtc_state))
5220 return PTR_ERR(new_crtc_state);
5223 * DC considers the stream backends changed if the
5224 * static metadata changes. Forcing the modeset also
5225 * gives a simple way for userspace to switch from
5226 * 8bpc to 10bpc when setting the metadata to enter
5229 * Changing the static metadata after it's been
5230 * set is permissible, however. So only force a
5231 * modeset if we're entering or exiting HDR.
5233 new_crtc_state->mode_changed =
5234 !old_con_state->hdr_output_metadata ||
5235 !new_con_state->hdr_output_metadata;
5241 static const struct drm_connector_helper_funcs
5242 amdgpu_dm_connector_helper_funcs = {
5244 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5245 * modes will be filtered by drm_mode_validate_size(), and those modes
5246 * are missing after user start lightdm. So we need to renew modes list.
5247 * in get_modes call back, not just return the modes count
5249 .get_modes = get_modes,
5250 .mode_valid = amdgpu_dm_connector_mode_valid,
5251 .atomic_check = amdgpu_dm_connector_atomic_check,
5254 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5258 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5260 struct drm_device *dev = new_crtc_state->crtc->dev;
5261 struct drm_plane *plane;
5263 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5264 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5271 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5273 struct drm_atomic_state *state = new_crtc_state->state;
5274 struct drm_plane *plane;
5277 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5278 struct drm_plane_state *new_plane_state;
5280 /* Cursor planes are "fake". */
5281 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5284 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5286 if (!new_plane_state) {
5288 * The plane is enable on the CRTC and hasn't changed
5289 * state. This means that it previously passed
5290 * validation and is therefore enabled.
5296 /* We need a framebuffer to be considered enabled. */
5297 num_active += (new_plane_state->fb != NULL);
5304 * Sets whether interrupts should be enabled on a specific CRTC.
5305 * We require that the stream be enabled and that there exist active
5306 * DC planes on the stream.
5309 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5310 struct drm_crtc_state *new_crtc_state)
5312 struct dm_crtc_state *dm_new_crtc_state =
5313 to_dm_crtc_state(new_crtc_state);
5315 dm_new_crtc_state->active_planes = 0;
5316 dm_new_crtc_state->interrupts_enabled = false;
5318 if (!dm_new_crtc_state->stream)
5321 dm_new_crtc_state->active_planes =
5322 count_crtc_active_planes(new_crtc_state);
5324 dm_new_crtc_state->interrupts_enabled =
5325 dm_new_crtc_state->active_planes > 0;
5328 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5329 struct drm_crtc_state *state)
5331 struct amdgpu_device *adev = crtc->dev->dev_private;
5332 struct dc *dc = adev->dm.dc;
5333 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5337 * Update interrupt state for the CRTC. This needs to happen whenever
5338 * the CRTC has changed or whenever any of its planes have changed.
5339 * Atomic check satisfies both of these requirements since the CRTC
5340 * is added to the state by DRM during drm_atomic_helper_check_planes.
5342 dm_update_crtc_interrupt_state(crtc, state);
5344 if (unlikely(!dm_crtc_state->stream &&
5345 modeset_required(state, NULL, dm_crtc_state->stream))) {
5350 /* In some use cases, like reset, no stream is attached */
5351 if (!dm_crtc_state->stream)
5355 * We want at least one hardware plane enabled to use
5356 * the stream with a cursor enabled.
5358 if (state->enable && state->active &&
5359 does_crtc_have_active_cursor(state) &&
5360 dm_crtc_state->active_planes == 0)
5363 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5369 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5370 const struct drm_display_mode *mode,
5371 struct drm_display_mode *adjusted_mode)
5376 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5377 .disable = dm_crtc_helper_disable,
5378 .atomic_check = dm_crtc_helper_atomic_check,
5379 .mode_fixup = dm_crtc_helper_mode_fixup,
5380 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5383 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5388 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5390 switch (display_color_depth) {
5391 case COLOR_DEPTH_666:
5393 case COLOR_DEPTH_888:
5395 case COLOR_DEPTH_101010:
5397 case COLOR_DEPTH_121212:
5399 case COLOR_DEPTH_141414:
5401 case COLOR_DEPTH_161616:
5409 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5410 struct drm_crtc_state *crtc_state,
5411 struct drm_connector_state *conn_state)
5413 struct drm_atomic_state *state = crtc_state->state;
5414 struct drm_connector *connector = conn_state->connector;
5415 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5416 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5417 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5418 struct drm_dp_mst_topology_mgr *mst_mgr;
5419 struct drm_dp_mst_port *mst_port;
5420 enum dc_color_depth color_depth;
5422 bool is_y420 = false;
5424 if (!aconnector->port || !aconnector->dc_sink)
5427 mst_port = aconnector->port;
5428 mst_mgr = &aconnector->mst_port->mst_mgr;
5430 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5433 if (!state->duplicated) {
5434 int max_bpc = conn_state->max_requested_bpc;
5435 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5436 aconnector->force_yuv420_output;
5437 color_depth = convert_color_depth_from_display_info(connector,
5440 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5441 clock = adjusted_mode->clock;
5442 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5444 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5447 dm_new_connector_state->pbn,
5448 dm_mst_get_pbn_divider(aconnector->dc_link));
5449 if (dm_new_connector_state->vcpi_slots < 0) {
5450 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5451 return dm_new_connector_state->vcpi_slots;
5456 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5457 .disable = dm_encoder_helper_disable,
5458 .atomic_check = dm_encoder_helper_atomic_check
5461 #if defined(CONFIG_DRM_AMD_DC_DCN)
5462 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5463 struct dc_state *dc_state)
5465 struct dc_stream_state *stream = NULL;
5466 struct drm_connector *connector;
5467 struct drm_connector_state *new_con_state, *old_con_state;
5468 struct amdgpu_dm_connector *aconnector;
5469 struct dm_connector_state *dm_conn_state;
5470 int i, j, clock, bpp;
5471 int vcpi, pbn_div, pbn = 0;
5473 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5475 aconnector = to_amdgpu_dm_connector(connector);
5477 if (!aconnector->port)
5480 if (!new_con_state || !new_con_state->crtc)
5483 dm_conn_state = to_dm_connector_state(new_con_state);
5485 for (j = 0; j < dc_state->stream_count; j++) {
5486 stream = dc_state->streams[j];
5490 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5499 if (stream->timing.flags.DSC != 1) {
5500 drm_dp_mst_atomic_enable_dsc(state,
5508 pbn_div = dm_mst_get_pbn_divider(stream->link);
5509 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5510 clock = stream->timing.pix_clk_100hz / 10;
5511 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5512 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5519 dm_conn_state->pbn = pbn;
5520 dm_conn_state->vcpi_slots = vcpi;
5526 static void dm_drm_plane_reset(struct drm_plane *plane)
5528 struct dm_plane_state *amdgpu_state = NULL;
5531 plane->funcs->atomic_destroy_state(plane, plane->state);
5533 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5534 WARN_ON(amdgpu_state == NULL);
5537 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5540 static struct drm_plane_state *
5541 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5543 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5545 old_dm_plane_state = to_dm_plane_state(plane->state);
5546 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5547 if (!dm_plane_state)
5550 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5552 if (old_dm_plane_state->dc_state) {
5553 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5554 dc_plane_state_retain(dm_plane_state->dc_state);
5557 return &dm_plane_state->base;
5560 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5561 struct drm_plane_state *state)
5563 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5565 if (dm_plane_state->dc_state)
5566 dc_plane_state_release(dm_plane_state->dc_state);
5568 drm_atomic_helper_plane_destroy_state(plane, state);
5571 static const struct drm_plane_funcs dm_plane_funcs = {
5572 .update_plane = drm_atomic_helper_update_plane,
5573 .disable_plane = drm_atomic_helper_disable_plane,
5574 .destroy = drm_primary_helper_destroy,
5575 .reset = dm_drm_plane_reset,
5576 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5577 .atomic_destroy_state = dm_drm_plane_destroy_state,
5580 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5581 struct drm_plane_state *new_state)
5583 struct amdgpu_framebuffer *afb;
5584 struct drm_gem_object *obj;
5585 struct amdgpu_device *adev;
5586 struct amdgpu_bo *rbo;
5587 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5588 struct list_head list;
5589 struct ttm_validate_buffer tv;
5590 struct ww_acquire_ctx ticket;
5591 uint64_t tiling_flags;
5594 bool tmz_surface = false;
5595 bool force_disable_dcc = false;
5597 dm_plane_state_old = to_dm_plane_state(plane->state);
5598 dm_plane_state_new = to_dm_plane_state(new_state);
5600 if (!new_state->fb) {
5601 DRM_DEBUG_DRIVER("No FB bound\n");
5605 afb = to_amdgpu_framebuffer(new_state->fb);
5606 obj = new_state->fb->obj[0];
5607 rbo = gem_to_amdgpu_bo(obj);
5608 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5609 INIT_LIST_HEAD(&list);
5613 list_add(&tv.head, &list);
5615 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5617 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5621 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5622 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5624 domain = AMDGPU_GEM_DOMAIN_VRAM;
5626 r = amdgpu_bo_pin(rbo, domain);
5627 if (unlikely(r != 0)) {
5628 if (r != -ERESTARTSYS)
5629 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5630 ttm_eu_backoff_reservation(&ticket, &list);
5634 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5635 if (unlikely(r != 0)) {
5636 amdgpu_bo_unpin(rbo);
5637 ttm_eu_backoff_reservation(&ticket, &list);
5638 DRM_ERROR("%p bind failed\n", rbo);
5642 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5644 tmz_surface = amdgpu_bo_encrypted(rbo);
5646 ttm_eu_backoff_reservation(&ticket, &list);
5648 afb->address = amdgpu_bo_gpu_offset(rbo);
5652 if (dm_plane_state_new->dc_state &&
5653 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5654 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5656 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5657 fill_plane_buffer_attributes(
5658 adev, afb, plane_state->format, plane_state->rotation,
5659 tiling_flags, &plane_state->tiling_info,
5660 &plane_state->plane_size, &plane_state->dcc,
5661 &plane_state->address, tmz_surface,
5668 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5669 struct drm_plane_state *old_state)
5671 struct amdgpu_bo *rbo;
5677 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5678 r = amdgpu_bo_reserve(rbo, false);
5680 DRM_ERROR("failed to reserve rbo before unpin\n");
5684 amdgpu_bo_unpin(rbo);
5685 amdgpu_bo_unreserve(rbo);
5686 amdgpu_bo_unref(&rbo);
5689 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5690 struct drm_crtc_state *new_crtc_state)
5692 int max_downscale = 0;
5693 int max_upscale = INT_MAX;
5695 /* TODO: These should be checked against DC plane caps */
5696 return drm_atomic_helper_check_plane_state(
5697 state, new_crtc_state, max_downscale, max_upscale, true, true);
5700 static int dm_plane_atomic_check(struct drm_plane *plane,
5701 struct drm_plane_state *state)
5703 struct amdgpu_device *adev = plane->dev->dev_private;
5704 struct dc *dc = adev->dm.dc;
5705 struct dm_plane_state *dm_plane_state;
5706 struct dc_scaling_info scaling_info;
5707 struct drm_crtc_state *new_crtc_state;
5710 dm_plane_state = to_dm_plane_state(state);
5712 if (!dm_plane_state->dc_state)
5716 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5717 if (!new_crtc_state)
5720 ret = dm_plane_helper_check_state(state, new_crtc_state);
5724 ret = fill_dc_scaling_info(state, &scaling_info);
5728 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5734 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5735 struct drm_plane_state *new_plane_state)
5737 /* Only support async updates on cursor planes. */
5738 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5744 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5745 struct drm_plane_state *new_state)
5747 struct drm_plane_state *old_state =
5748 drm_atomic_get_old_plane_state(new_state->state, plane);
5750 swap(plane->state->fb, new_state->fb);
5752 plane->state->src_x = new_state->src_x;
5753 plane->state->src_y = new_state->src_y;
5754 plane->state->src_w = new_state->src_w;
5755 plane->state->src_h = new_state->src_h;
5756 plane->state->crtc_x = new_state->crtc_x;
5757 plane->state->crtc_y = new_state->crtc_y;
5758 plane->state->crtc_w = new_state->crtc_w;
5759 plane->state->crtc_h = new_state->crtc_h;
5761 handle_cursor_update(plane, old_state);
5764 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5765 .prepare_fb = dm_plane_helper_prepare_fb,
5766 .cleanup_fb = dm_plane_helper_cleanup_fb,
5767 .atomic_check = dm_plane_atomic_check,
5768 .atomic_async_check = dm_plane_atomic_async_check,
5769 .atomic_async_update = dm_plane_atomic_async_update
5773 * TODO: these are currently initialized to rgb formats only.
5774 * For future use cases we should either initialize them dynamically based on
5775 * plane capabilities, or initialize this array to all formats, so internal drm
5776 * check will succeed, and let DC implement proper check
5778 static const uint32_t rgb_formats[] = {
5779 DRM_FORMAT_XRGB8888,
5780 DRM_FORMAT_ARGB8888,
5781 DRM_FORMAT_RGBA8888,
5782 DRM_FORMAT_XRGB2101010,
5783 DRM_FORMAT_XBGR2101010,
5784 DRM_FORMAT_ARGB2101010,
5785 DRM_FORMAT_ABGR2101010,
5786 DRM_FORMAT_XBGR8888,
5787 DRM_FORMAT_ABGR8888,
5791 static const uint32_t overlay_formats[] = {
5792 DRM_FORMAT_XRGB8888,
5793 DRM_FORMAT_ARGB8888,
5794 DRM_FORMAT_RGBA8888,
5795 DRM_FORMAT_XBGR8888,
5796 DRM_FORMAT_ABGR8888,
5800 static const u32 cursor_formats[] = {
5804 static int get_plane_formats(const struct drm_plane *plane,
5805 const struct dc_plane_cap *plane_cap,
5806 uint32_t *formats, int max_formats)
5808 int i, num_formats = 0;
5811 * TODO: Query support for each group of formats directly from
5812 * DC plane caps. This will require adding more formats to the
5816 switch (plane->type) {
5817 case DRM_PLANE_TYPE_PRIMARY:
5818 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5819 if (num_formats >= max_formats)
5822 formats[num_formats++] = rgb_formats[i];
5825 if (plane_cap && plane_cap->pixel_format_support.nv12)
5826 formats[num_formats++] = DRM_FORMAT_NV12;
5827 if (plane_cap && plane_cap->pixel_format_support.p010)
5828 formats[num_formats++] = DRM_FORMAT_P010;
5829 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5830 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5831 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5832 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5833 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5837 case DRM_PLANE_TYPE_OVERLAY:
5838 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5839 if (num_formats >= max_formats)
5842 formats[num_formats++] = overlay_formats[i];
5846 case DRM_PLANE_TYPE_CURSOR:
5847 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5848 if (num_formats >= max_formats)
5851 formats[num_formats++] = cursor_formats[i];
5859 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5860 struct drm_plane *plane,
5861 unsigned long possible_crtcs,
5862 const struct dc_plane_cap *plane_cap)
5864 uint32_t formats[32];
5868 num_formats = get_plane_formats(plane, plane_cap, formats,
5869 ARRAY_SIZE(formats));
5871 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5872 &dm_plane_funcs, formats, num_formats,
5873 NULL, plane->type, NULL);
5877 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5878 plane_cap && plane_cap->per_pixel_alpha) {
5879 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5880 BIT(DRM_MODE_BLEND_PREMULTI);
5882 drm_plane_create_alpha_property(plane);
5883 drm_plane_create_blend_mode_property(plane, blend_caps);
5886 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5888 (plane_cap->pixel_format_support.nv12 ||
5889 plane_cap->pixel_format_support.p010)) {
5890 /* This only affects YUV formats. */
5891 drm_plane_create_color_properties(
5893 BIT(DRM_COLOR_YCBCR_BT601) |
5894 BIT(DRM_COLOR_YCBCR_BT709) |
5895 BIT(DRM_COLOR_YCBCR_BT2020),
5896 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5897 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5898 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5901 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5903 /* Create (reset) the plane state */
5904 if (plane->funcs->reset)
5905 plane->funcs->reset(plane);
5910 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5911 struct drm_plane *plane,
5912 uint32_t crtc_index)
5914 struct amdgpu_crtc *acrtc = NULL;
5915 struct drm_plane *cursor_plane;
5919 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5923 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5924 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5926 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5930 res = drm_crtc_init_with_planes(
5935 &amdgpu_dm_crtc_funcs, NULL);
5940 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5942 /* Create (reset) the plane state */
5943 if (acrtc->base.funcs->reset)
5944 acrtc->base.funcs->reset(&acrtc->base);
5946 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5947 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5949 acrtc->crtc_id = crtc_index;
5950 acrtc->base.enabled = false;
5951 acrtc->otg_inst = -1;
5953 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5954 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5955 true, MAX_COLOR_LUT_ENTRIES);
5956 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5962 kfree(cursor_plane);
5967 static int to_drm_connector_type(enum signal_type st)
5970 case SIGNAL_TYPE_HDMI_TYPE_A:
5971 return DRM_MODE_CONNECTOR_HDMIA;
5972 case SIGNAL_TYPE_EDP:
5973 return DRM_MODE_CONNECTOR_eDP;
5974 case SIGNAL_TYPE_LVDS:
5975 return DRM_MODE_CONNECTOR_LVDS;
5976 case SIGNAL_TYPE_RGB:
5977 return DRM_MODE_CONNECTOR_VGA;
5978 case SIGNAL_TYPE_DISPLAY_PORT:
5979 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5980 return DRM_MODE_CONNECTOR_DisplayPort;
5981 case SIGNAL_TYPE_DVI_DUAL_LINK:
5982 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5983 return DRM_MODE_CONNECTOR_DVID;
5984 case SIGNAL_TYPE_VIRTUAL:
5985 return DRM_MODE_CONNECTOR_VIRTUAL;
5988 return DRM_MODE_CONNECTOR_Unknown;
5992 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5994 struct drm_encoder *encoder;
5996 /* There is only one encoder per connector */
5997 drm_connector_for_each_possible_encoder(connector, encoder)
6003 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6005 struct drm_encoder *encoder;
6006 struct amdgpu_encoder *amdgpu_encoder;
6008 encoder = amdgpu_dm_connector_to_encoder(connector);
6010 if (encoder == NULL)
6013 amdgpu_encoder = to_amdgpu_encoder(encoder);
6015 amdgpu_encoder->native_mode.clock = 0;
6017 if (!list_empty(&connector->probed_modes)) {
6018 struct drm_display_mode *preferred_mode = NULL;
6020 list_for_each_entry(preferred_mode,
6021 &connector->probed_modes,
6023 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6024 amdgpu_encoder->native_mode = *preferred_mode;
6032 static struct drm_display_mode *
6033 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6035 int hdisplay, int vdisplay)
6037 struct drm_device *dev = encoder->dev;
6038 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6039 struct drm_display_mode *mode = NULL;
6040 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6042 mode = drm_mode_duplicate(dev, native_mode);
6047 mode->hdisplay = hdisplay;
6048 mode->vdisplay = vdisplay;
6049 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6050 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6056 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6057 struct drm_connector *connector)
6059 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6060 struct drm_display_mode *mode = NULL;
6061 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6062 struct amdgpu_dm_connector *amdgpu_dm_connector =
6063 to_amdgpu_dm_connector(connector);
6067 char name[DRM_DISPLAY_MODE_LEN];
6070 } common_modes[] = {
6071 { "640x480", 640, 480},
6072 { "800x600", 800, 600},
6073 { "1024x768", 1024, 768},
6074 { "1280x720", 1280, 720},
6075 { "1280x800", 1280, 800},
6076 {"1280x1024", 1280, 1024},
6077 { "1440x900", 1440, 900},
6078 {"1680x1050", 1680, 1050},
6079 {"1600x1200", 1600, 1200},
6080 {"1920x1080", 1920, 1080},
6081 {"1920x1200", 1920, 1200}
6084 n = ARRAY_SIZE(common_modes);
6086 for (i = 0; i < n; i++) {
6087 struct drm_display_mode *curmode = NULL;
6088 bool mode_existed = false;
6090 if (common_modes[i].w > native_mode->hdisplay ||
6091 common_modes[i].h > native_mode->vdisplay ||
6092 (common_modes[i].w == native_mode->hdisplay &&
6093 common_modes[i].h == native_mode->vdisplay))
6096 list_for_each_entry(curmode, &connector->probed_modes, head) {
6097 if (common_modes[i].w == curmode->hdisplay &&
6098 common_modes[i].h == curmode->vdisplay) {
6099 mode_existed = true;
6107 mode = amdgpu_dm_create_common_mode(encoder,
6108 common_modes[i].name, common_modes[i].w,
6110 drm_mode_probed_add(connector, mode);
6111 amdgpu_dm_connector->num_modes++;
6115 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6118 struct amdgpu_dm_connector *amdgpu_dm_connector =
6119 to_amdgpu_dm_connector(connector);
6122 /* empty probed_modes */
6123 INIT_LIST_HEAD(&connector->probed_modes);
6124 amdgpu_dm_connector->num_modes =
6125 drm_add_edid_modes(connector, edid);
6127 /* sorting the probed modes before calling function
6128 * amdgpu_dm_get_native_mode() since EDID can have
6129 * more than one preferred mode. The modes that are
6130 * later in the probed mode list could be of higher
6131 * and preferred resolution. For example, 3840x2160
6132 * resolution in base EDID preferred timing and 4096x2160
6133 * preferred resolution in DID extension block later.
6135 drm_mode_sort(&connector->probed_modes);
6136 amdgpu_dm_get_native_mode(connector);
6138 amdgpu_dm_connector->num_modes = 0;
6142 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6144 struct amdgpu_dm_connector *amdgpu_dm_connector =
6145 to_amdgpu_dm_connector(connector);
6146 struct drm_encoder *encoder;
6147 struct edid *edid = amdgpu_dm_connector->edid;
6149 encoder = amdgpu_dm_connector_to_encoder(connector);
6151 if (!edid || !drm_edid_is_valid(edid)) {
6152 amdgpu_dm_connector->num_modes =
6153 drm_add_modes_noedid(connector, 640, 480);
6155 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6156 amdgpu_dm_connector_add_common_modes(encoder, connector);
6158 amdgpu_dm_fbc_init(connector);
6160 return amdgpu_dm_connector->num_modes;
6163 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6164 struct amdgpu_dm_connector *aconnector,
6166 struct dc_link *link,
6169 struct amdgpu_device *adev = dm->ddev->dev_private;
6172 * Some of the properties below require access to state, like bpc.
6173 * Allocate some default initial connector state with our reset helper.
6175 if (aconnector->base.funcs->reset)
6176 aconnector->base.funcs->reset(&aconnector->base);
6178 aconnector->connector_id = link_index;
6179 aconnector->dc_link = link;
6180 aconnector->base.interlace_allowed = false;
6181 aconnector->base.doublescan_allowed = false;
6182 aconnector->base.stereo_allowed = false;
6183 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6184 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6185 aconnector->audio_inst = -1;
6186 mutex_init(&aconnector->hpd_lock);
6189 * configure support HPD hot plug connector_>polled default value is 0
6190 * which means HPD hot plug not supported
6192 switch (connector_type) {
6193 case DRM_MODE_CONNECTOR_HDMIA:
6194 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6195 aconnector->base.ycbcr_420_allowed =
6196 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6198 case DRM_MODE_CONNECTOR_DisplayPort:
6199 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6200 aconnector->base.ycbcr_420_allowed =
6201 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6203 case DRM_MODE_CONNECTOR_DVID:
6204 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6210 drm_object_attach_property(&aconnector->base.base,
6211 dm->ddev->mode_config.scaling_mode_property,
6212 DRM_MODE_SCALE_NONE);
6214 drm_object_attach_property(&aconnector->base.base,
6215 adev->mode_info.underscan_property,
6217 drm_object_attach_property(&aconnector->base.base,
6218 adev->mode_info.underscan_hborder_property,
6220 drm_object_attach_property(&aconnector->base.base,
6221 adev->mode_info.underscan_vborder_property,
6224 if (!aconnector->mst_port)
6225 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6227 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6228 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6229 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6231 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6232 dc_is_dmcu_initialized(adev->dm.dc)) {
6233 drm_object_attach_property(&aconnector->base.base,
6234 adev->mode_info.abm_level_property, 0);
6237 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6238 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6239 connector_type == DRM_MODE_CONNECTOR_eDP) {
6240 drm_object_attach_property(
6241 &aconnector->base.base,
6242 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6244 if (!aconnector->mst_port)
6245 drm_connector_attach_vrr_capable_property(&aconnector->base);
6247 #ifdef CONFIG_DRM_AMD_DC_HDCP
6248 if (adev->dm.hdcp_workqueue)
6249 drm_connector_attach_content_protection_property(&aconnector->base, true);
6254 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6255 struct i2c_msg *msgs, int num)
6257 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6258 struct ddc_service *ddc_service = i2c->ddc_service;
6259 struct i2c_command cmd;
6263 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6268 cmd.number_of_payloads = num;
6269 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6272 for (i = 0; i < num; i++) {
6273 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6274 cmd.payloads[i].address = msgs[i].addr;
6275 cmd.payloads[i].length = msgs[i].len;
6276 cmd.payloads[i].data = msgs[i].buf;
6280 ddc_service->ctx->dc,
6281 ddc_service->ddc_pin->hw_info.ddc_channel,
6285 kfree(cmd.payloads);
6289 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6291 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6294 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6295 .master_xfer = amdgpu_dm_i2c_xfer,
6296 .functionality = amdgpu_dm_i2c_func,
6299 static struct amdgpu_i2c_adapter *
6300 create_i2c(struct ddc_service *ddc_service,
6304 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6305 struct amdgpu_i2c_adapter *i2c;
6307 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6310 i2c->base.owner = THIS_MODULE;
6311 i2c->base.class = I2C_CLASS_DDC;
6312 i2c->base.dev.parent = &adev->pdev->dev;
6313 i2c->base.algo = &amdgpu_dm_i2c_algo;
6314 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6315 i2c_set_adapdata(&i2c->base, i2c);
6316 i2c->ddc_service = ddc_service;
6317 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6324 * Note: this function assumes that dc_link_detect() was called for the
6325 * dc_link which will be represented by this aconnector.
6327 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6328 struct amdgpu_dm_connector *aconnector,
6329 uint32_t link_index,
6330 struct amdgpu_encoder *aencoder)
6334 struct dc *dc = dm->dc;
6335 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6336 struct amdgpu_i2c_adapter *i2c;
6338 link->priv = aconnector;
6340 DRM_DEBUG_DRIVER("%s()\n", __func__);
6342 i2c = create_i2c(link->ddc, link->link_index, &res);
6344 DRM_ERROR("Failed to create i2c adapter data\n");
6348 aconnector->i2c = i2c;
6349 res = i2c_add_adapter(&i2c->base);
6352 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6356 connector_type = to_drm_connector_type(link->connector_signal);
6358 res = drm_connector_init_with_ddc(
6361 &amdgpu_dm_connector_funcs,
6366 DRM_ERROR("connector_init failed\n");
6367 aconnector->connector_id = -1;
6371 drm_connector_helper_add(
6373 &amdgpu_dm_connector_helper_funcs);
6375 amdgpu_dm_connector_init_helper(
6382 drm_connector_attach_encoder(
6383 &aconnector->base, &aencoder->base);
6385 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6386 || connector_type == DRM_MODE_CONNECTOR_eDP)
6387 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6392 aconnector->i2c = NULL;
6397 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6399 switch (adev->mode_info.num_crtc) {
6416 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6417 struct amdgpu_encoder *aencoder,
6418 uint32_t link_index)
6420 struct amdgpu_device *adev = dev->dev_private;
6422 int res = drm_encoder_init(dev,
6424 &amdgpu_dm_encoder_funcs,
6425 DRM_MODE_ENCODER_TMDS,
6428 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6431 aencoder->encoder_id = link_index;
6433 aencoder->encoder_id = -1;
6435 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6440 static void manage_dm_interrupts(struct amdgpu_device *adev,
6441 struct amdgpu_crtc *acrtc,
6445 * this is not correct translation but will work as soon as VBLANK
6446 * constant is the same as PFLIP
6449 amdgpu_display_crtc_idx_to_irq_type(
6454 drm_crtc_vblank_on(&acrtc->base);
6457 &adev->pageflip_irq,
6463 &adev->pageflip_irq,
6465 drm_crtc_vblank_off(&acrtc->base);
6470 is_scaling_state_different(const struct dm_connector_state *dm_state,
6471 const struct dm_connector_state *old_dm_state)
6473 if (dm_state->scaling != old_dm_state->scaling)
6475 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6476 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6478 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6479 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6481 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6482 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6487 #ifdef CONFIG_DRM_AMD_DC_HDCP
6488 static bool is_content_protection_different(struct drm_connector_state *state,
6489 const struct drm_connector_state *old_state,
6490 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6492 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6494 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6495 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6496 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6500 /* CP is being re enabled, ignore this */
6501 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6502 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6503 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6507 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6508 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6509 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6510 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6512 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6513 * hot-plug, headless s3, dpms
6515 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6516 aconnector->dc_sink != NULL)
6519 if (old_state->content_protection == state->content_protection)
6522 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6529 static void remove_stream(struct amdgpu_device *adev,
6530 struct amdgpu_crtc *acrtc,
6531 struct dc_stream_state *stream)
6533 /* this is the update mode case */
6535 acrtc->otg_inst = -1;
6536 acrtc->enabled = false;
6539 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6540 struct dc_cursor_position *position)
6542 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6544 int xorigin = 0, yorigin = 0;
6546 position->enable = false;
6550 if (!crtc || !plane->state->fb)
6553 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6554 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6555 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6557 plane->state->crtc_w,
6558 plane->state->crtc_h);
6562 x = plane->state->crtc_x;
6563 y = plane->state->crtc_y;
6565 if (x <= -amdgpu_crtc->max_cursor_width ||
6566 y <= -amdgpu_crtc->max_cursor_height)
6570 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6574 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6577 position->enable = true;
6578 position->translate_by_source = true;
6581 position->x_hotspot = xorigin;
6582 position->y_hotspot = yorigin;
6587 static void handle_cursor_update(struct drm_plane *plane,
6588 struct drm_plane_state *old_plane_state)
6590 struct amdgpu_device *adev = plane->dev->dev_private;
6591 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6592 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6593 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6594 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6595 uint64_t address = afb ? afb->address : 0;
6596 struct dc_cursor_position position;
6597 struct dc_cursor_attributes attributes;
6600 if (!plane->state->fb && !old_plane_state->fb)
6603 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6605 amdgpu_crtc->crtc_id,
6606 plane->state->crtc_w,
6607 plane->state->crtc_h);
6609 ret = get_cursor_position(plane, crtc, &position);
6613 if (!position.enable) {
6614 /* turn off cursor */
6615 if (crtc_state && crtc_state->stream) {
6616 mutex_lock(&adev->dm.dc_lock);
6617 dc_stream_set_cursor_position(crtc_state->stream,
6619 mutex_unlock(&adev->dm.dc_lock);
6624 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6625 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6627 memset(&attributes, 0, sizeof(attributes));
6628 attributes.address.high_part = upper_32_bits(address);
6629 attributes.address.low_part = lower_32_bits(address);
6630 attributes.width = plane->state->crtc_w;
6631 attributes.height = plane->state->crtc_h;
6632 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6633 attributes.rotation_angle = 0;
6634 attributes.attribute_flags.value = 0;
6636 attributes.pitch = attributes.width;
6638 if (crtc_state->stream) {
6639 mutex_lock(&adev->dm.dc_lock);
6640 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6642 DRM_ERROR("DC failed to set cursor attributes\n");
6644 if (!dc_stream_set_cursor_position(crtc_state->stream,
6646 DRM_ERROR("DC failed to set cursor position\n");
6647 mutex_unlock(&adev->dm.dc_lock);
6651 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6654 assert_spin_locked(&acrtc->base.dev->event_lock);
6655 WARN_ON(acrtc->event);
6657 acrtc->event = acrtc->base.state->event;
6659 /* Set the flip status */
6660 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6662 /* Mark this event as consumed */
6663 acrtc->base.state->event = NULL;
6665 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6669 static void update_freesync_state_on_stream(
6670 struct amdgpu_display_manager *dm,
6671 struct dm_crtc_state *new_crtc_state,
6672 struct dc_stream_state *new_stream,
6673 struct dc_plane_state *surface,
6674 u32 flip_timestamp_in_us)
6676 struct mod_vrr_params vrr_params;
6677 struct dc_info_packet vrr_infopacket = {0};
6678 struct amdgpu_device *adev = dm->adev;
6679 unsigned long flags;
6685 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6686 * For now it's sufficient to just guard against these conditions.
6689 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6692 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6693 vrr_params = new_crtc_state->vrr_params;
6696 mod_freesync_handle_preflip(
6697 dm->freesync_module,
6700 flip_timestamp_in_us,
6703 if (adev->family < AMDGPU_FAMILY_AI &&
6704 amdgpu_dm_vrr_active(new_crtc_state)) {
6705 mod_freesync_handle_v_update(dm->freesync_module,
6706 new_stream, &vrr_params);
6708 /* Need to call this before the frame ends. */
6709 dc_stream_adjust_vmin_vmax(dm->dc,
6710 new_crtc_state->stream,
6711 &vrr_params.adjust);
6715 mod_freesync_build_vrr_infopacket(
6716 dm->freesync_module,
6720 TRANSFER_FUNC_UNKNOWN,
6723 new_crtc_state->freesync_timing_changed |=
6724 (memcmp(&new_crtc_state->vrr_params.adjust,
6726 sizeof(vrr_params.adjust)) != 0);
6728 new_crtc_state->freesync_vrr_info_changed |=
6729 (memcmp(&new_crtc_state->vrr_infopacket,
6731 sizeof(vrr_infopacket)) != 0);
6733 new_crtc_state->vrr_params = vrr_params;
6734 new_crtc_state->vrr_infopacket = vrr_infopacket;
6736 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6737 new_stream->vrr_infopacket = vrr_infopacket;
6739 if (new_crtc_state->freesync_vrr_info_changed)
6740 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6741 new_crtc_state->base.crtc->base.id,
6742 (int)new_crtc_state->base.vrr_enabled,
6743 (int)vrr_params.state);
6745 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6748 static void pre_update_freesync_state_on_stream(
6749 struct amdgpu_display_manager *dm,
6750 struct dm_crtc_state *new_crtc_state)
6752 struct dc_stream_state *new_stream = new_crtc_state->stream;
6753 struct mod_vrr_params vrr_params;
6754 struct mod_freesync_config config = new_crtc_state->freesync_config;
6755 struct amdgpu_device *adev = dm->adev;
6756 unsigned long flags;
6762 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6763 * For now it's sufficient to just guard against these conditions.
6765 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6768 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6769 vrr_params = new_crtc_state->vrr_params;
6771 if (new_crtc_state->vrr_supported &&
6772 config.min_refresh_in_uhz &&
6773 config.max_refresh_in_uhz) {
6774 config.state = new_crtc_state->base.vrr_enabled ?
6775 VRR_STATE_ACTIVE_VARIABLE :
6778 config.state = VRR_STATE_UNSUPPORTED;
6781 mod_freesync_build_vrr_params(dm->freesync_module,
6783 &config, &vrr_params);
6785 new_crtc_state->freesync_timing_changed |=
6786 (memcmp(&new_crtc_state->vrr_params.adjust,
6788 sizeof(vrr_params.adjust)) != 0);
6790 new_crtc_state->vrr_params = vrr_params;
6791 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6794 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6795 struct dm_crtc_state *new_state)
6797 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6798 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6800 if (!old_vrr_active && new_vrr_active) {
6801 /* Transition VRR inactive -> active:
6802 * While VRR is active, we must not disable vblank irq, as a
6803 * reenable after disable would compute bogus vblank/pflip
6804 * timestamps if it likely happened inside display front-porch.
6806 * We also need vupdate irq for the actual core vblank handling
6809 dm_set_vupdate_irq(new_state->base.crtc, true);
6810 drm_crtc_vblank_get(new_state->base.crtc);
6811 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6812 __func__, new_state->base.crtc->base.id);
6813 } else if (old_vrr_active && !new_vrr_active) {
6814 /* Transition VRR active -> inactive:
6815 * Allow vblank irq disable again for fixed refresh rate.
6817 dm_set_vupdate_irq(new_state->base.crtc, false);
6818 drm_crtc_vblank_put(new_state->base.crtc);
6819 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6820 __func__, new_state->base.crtc->base.id);
6824 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6826 struct drm_plane *plane;
6827 struct drm_plane_state *old_plane_state, *new_plane_state;
6831 * TODO: Make this per-stream so we don't issue redundant updates for
6832 * commits with multiple streams.
6834 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6836 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6837 handle_cursor_update(plane, old_plane_state);
6840 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6841 struct dc_state *dc_state,
6842 struct drm_device *dev,
6843 struct amdgpu_display_manager *dm,
6844 struct drm_crtc *pcrtc,
6845 bool wait_for_vblank)
6848 uint64_t timestamp_ns;
6849 struct drm_plane *plane;
6850 struct drm_plane_state *old_plane_state, *new_plane_state;
6851 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6852 struct drm_crtc_state *new_pcrtc_state =
6853 drm_atomic_get_new_crtc_state(state, pcrtc);
6854 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6855 struct dm_crtc_state *dm_old_crtc_state =
6856 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6857 int planes_count = 0, vpos, hpos;
6859 unsigned long flags;
6860 struct amdgpu_bo *abo;
6861 uint64_t tiling_flags;
6862 bool tmz_surface = false;
6863 uint32_t target_vblank, last_flip_vblank;
6864 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6865 bool pflip_present = false;
6867 struct dc_surface_update surface_updates[MAX_SURFACES];
6868 struct dc_plane_info plane_infos[MAX_SURFACES];
6869 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6870 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6871 struct dc_stream_update stream_update;
6874 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6877 dm_error("Failed to allocate update bundle\n");
6882 * Disable the cursor first if we're disabling all the planes.
6883 * It'll remain on the screen after the planes are re-enabled
6886 if (acrtc_state->active_planes == 0)
6887 amdgpu_dm_commit_cursors(state);
6889 /* update planes when needed */
6890 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6891 struct drm_crtc *crtc = new_plane_state->crtc;
6892 struct drm_crtc_state *new_crtc_state;
6893 struct drm_framebuffer *fb = new_plane_state->fb;
6894 bool plane_needs_flip;
6895 struct dc_plane_state *dc_plane;
6896 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6898 /* Cursor plane is handled after stream updates */
6899 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6902 if (!fb || !crtc || pcrtc != crtc)
6905 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6906 if (!new_crtc_state->active)
6909 dc_plane = dm_new_plane_state->dc_state;
6911 bundle->surface_updates[planes_count].surface = dc_plane;
6912 if (new_pcrtc_state->color_mgmt_changed) {
6913 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6914 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6915 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6918 fill_dc_scaling_info(new_plane_state,
6919 &bundle->scaling_infos[planes_count]);
6921 bundle->surface_updates[planes_count].scaling_info =
6922 &bundle->scaling_infos[planes_count];
6924 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6926 pflip_present = pflip_present || plane_needs_flip;
6928 if (!plane_needs_flip) {
6933 abo = gem_to_amdgpu_bo(fb->obj[0]);
6936 * Wait for all fences on this FB. Do limited wait to avoid
6937 * deadlock during GPU reset when this fence will not signal
6938 * but we hold reservation lock for the BO.
6940 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6942 msecs_to_jiffies(5000));
6943 if (unlikely(r <= 0))
6944 DRM_ERROR("Waiting for fences timed out!");
6947 * TODO This might fail and hence better not used, wait
6948 * explicitly on fences instead
6949 * and in general should be called for
6950 * blocking commit to as per framework helpers
6952 r = amdgpu_bo_reserve(abo, true);
6953 if (unlikely(r != 0))
6954 DRM_ERROR("failed to reserve buffer before flip\n");
6956 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6958 tmz_surface = amdgpu_bo_encrypted(abo);
6960 amdgpu_bo_unreserve(abo);
6962 fill_dc_plane_info_and_addr(
6963 dm->adev, new_plane_state, tiling_flags,
6964 &bundle->plane_infos[planes_count],
6965 &bundle->flip_addrs[planes_count].address,
6969 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6970 new_plane_state->plane->index,
6971 bundle->plane_infos[planes_count].dcc.enable);
6973 bundle->surface_updates[planes_count].plane_info =
6974 &bundle->plane_infos[planes_count];
6977 * Only allow immediate flips for fast updates that don't
6978 * change FB pitch, DCC state, rotation or mirroing.
6980 bundle->flip_addrs[planes_count].flip_immediate =
6981 crtc->state->async_flip &&
6982 acrtc_state->update_type == UPDATE_TYPE_FAST;
6984 timestamp_ns = ktime_get_ns();
6985 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6986 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6987 bundle->surface_updates[planes_count].surface = dc_plane;
6989 if (!bundle->surface_updates[planes_count].surface) {
6990 DRM_ERROR("No surface for CRTC: id=%d\n",
6991 acrtc_attach->crtc_id);
6995 if (plane == pcrtc->primary)
6996 update_freesync_state_on_stream(
6999 acrtc_state->stream,
7001 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7003 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7005 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7006 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7012 if (pflip_present) {
7014 /* Use old throttling in non-vrr fixed refresh rate mode
7015 * to keep flip scheduling based on target vblank counts
7016 * working in a backwards compatible way, e.g., for
7017 * clients using the GLX_OML_sync_control extension or
7018 * DRI3/Present extension with defined target_msc.
7020 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7023 /* For variable refresh rate mode only:
7024 * Get vblank of last completed flip to avoid > 1 vrr
7025 * flips per video frame by use of throttling, but allow
7026 * flip programming anywhere in the possibly large
7027 * variable vrr vblank interval for fine-grained flip
7028 * timing control and more opportunity to avoid stutter
7029 * on late submission of flips.
7031 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7032 last_flip_vblank = acrtc_attach->last_flip_vblank;
7033 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7036 target_vblank = last_flip_vblank + wait_for_vblank;
7039 * Wait until we're out of the vertical blank period before the one
7040 * targeted by the flip
7042 while ((acrtc_attach->enabled &&
7043 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7044 0, &vpos, &hpos, NULL,
7045 NULL, &pcrtc->hwmode)
7046 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7047 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7048 (int)(target_vblank -
7049 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7050 usleep_range(1000, 1100);
7053 if (acrtc_attach->base.state->event) {
7054 drm_crtc_vblank_get(pcrtc);
7056 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7058 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7059 prepare_flip_isr(acrtc_attach);
7061 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7064 if (acrtc_state->stream) {
7065 if (acrtc_state->freesync_vrr_info_changed)
7066 bundle->stream_update.vrr_infopacket =
7067 &acrtc_state->stream->vrr_infopacket;
7071 /* Update the planes if changed or disable if we don't have any. */
7072 if ((planes_count || acrtc_state->active_planes == 0) &&
7073 acrtc_state->stream) {
7074 bundle->stream_update.stream = acrtc_state->stream;
7075 if (new_pcrtc_state->mode_changed) {
7076 bundle->stream_update.src = acrtc_state->stream->src;
7077 bundle->stream_update.dst = acrtc_state->stream->dst;
7080 if (new_pcrtc_state->color_mgmt_changed) {
7082 * TODO: This isn't fully correct since we've actually
7083 * already modified the stream in place.
7085 bundle->stream_update.gamut_remap =
7086 &acrtc_state->stream->gamut_remap_matrix;
7087 bundle->stream_update.output_csc_transform =
7088 &acrtc_state->stream->csc_color_matrix;
7089 bundle->stream_update.out_transfer_func =
7090 acrtc_state->stream->out_transfer_func;
7093 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7094 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7095 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7098 * If FreeSync state on the stream has changed then we need to
7099 * re-adjust the min/max bounds now that DC doesn't handle this
7100 * as part of commit.
7102 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7103 amdgpu_dm_vrr_active(acrtc_state)) {
7104 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7105 dc_stream_adjust_vmin_vmax(
7106 dm->dc, acrtc_state->stream,
7107 &acrtc_state->vrr_params.adjust);
7108 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7110 mutex_lock(&dm->dc_lock);
7111 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7112 acrtc_state->stream->link->psr_settings.psr_allow_active)
7113 amdgpu_dm_psr_disable(acrtc_state->stream);
7115 dc_commit_updates_for_stream(dm->dc,
7116 bundle->surface_updates,
7118 acrtc_state->stream,
7119 &bundle->stream_update,
7122 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7123 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7124 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7125 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7126 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7127 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7128 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7129 amdgpu_dm_psr_enable(acrtc_state->stream);
7132 mutex_unlock(&dm->dc_lock);
7136 * Update cursor state *after* programming all the planes.
7137 * This avoids redundant programming in the case where we're going
7138 * to be disabling a single plane - those pipes are being disabled.
7140 if (acrtc_state->active_planes)
7141 amdgpu_dm_commit_cursors(state);
7147 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7148 struct drm_atomic_state *state)
7150 struct amdgpu_device *adev = dev->dev_private;
7151 struct amdgpu_dm_connector *aconnector;
7152 struct drm_connector *connector;
7153 struct drm_connector_state *old_con_state, *new_con_state;
7154 struct drm_crtc_state *new_crtc_state;
7155 struct dm_crtc_state *new_dm_crtc_state;
7156 const struct dc_stream_status *status;
7159 /* Notify device removals. */
7160 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7161 if (old_con_state->crtc != new_con_state->crtc) {
7162 /* CRTC changes require notification. */
7166 if (!new_con_state->crtc)
7169 new_crtc_state = drm_atomic_get_new_crtc_state(
7170 state, new_con_state->crtc);
7172 if (!new_crtc_state)
7175 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7179 aconnector = to_amdgpu_dm_connector(connector);
7181 mutex_lock(&adev->dm.audio_lock);
7182 inst = aconnector->audio_inst;
7183 aconnector->audio_inst = -1;
7184 mutex_unlock(&adev->dm.audio_lock);
7186 amdgpu_dm_audio_eld_notify(adev, inst);
7189 /* Notify audio device additions. */
7190 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7191 if (!new_con_state->crtc)
7194 new_crtc_state = drm_atomic_get_new_crtc_state(
7195 state, new_con_state->crtc);
7197 if (!new_crtc_state)
7200 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7203 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7204 if (!new_dm_crtc_state->stream)
7207 status = dc_stream_get_status(new_dm_crtc_state->stream);
7211 aconnector = to_amdgpu_dm_connector(connector);
7213 mutex_lock(&adev->dm.audio_lock);
7214 inst = status->audio_inst;
7215 aconnector->audio_inst = inst;
7216 mutex_unlock(&adev->dm.audio_lock);
7218 amdgpu_dm_audio_eld_notify(adev, inst);
7223 * Enable interrupts on CRTCs that are newly active, undergone
7224 * a modeset, or have active planes again.
7226 * Done in two passes, based on the for_modeset flag:
7227 * Pass 1: For CRTCs going through modeset
7228 * Pass 2: For CRTCs going from 0 to n active planes
7230 * Interrupts can only be enabled after the planes are programmed,
7231 * so this requires a two-pass approach since we don't want to
7232 * just defer the interrupts until after commit planes every time.
7234 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7235 struct drm_atomic_state *state,
7238 struct amdgpu_device *adev = dev->dev_private;
7239 struct drm_crtc *crtc;
7240 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7242 #ifdef CONFIG_DEBUG_FS
7243 enum amdgpu_dm_pipe_crc_source source;
7246 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7247 new_crtc_state, i) {
7248 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7249 struct dm_crtc_state *dm_new_crtc_state =
7250 to_dm_crtc_state(new_crtc_state);
7251 struct dm_crtc_state *dm_old_crtc_state =
7252 to_dm_crtc_state(old_crtc_state);
7253 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7256 run_pass = (for_modeset && modeset) ||
7257 (!for_modeset && !modeset &&
7258 !dm_old_crtc_state->interrupts_enabled);
7263 if (!dm_new_crtc_state->interrupts_enabled)
7266 manage_dm_interrupts(adev, acrtc, true);
7268 #ifdef CONFIG_DEBUG_FS
7269 /* The stream has changed so CRC capture needs to re-enabled. */
7270 source = dm_new_crtc_state->crc_src;
7271 if (amdgpu_dm_is_valid_crc_source(source)) {
7272 amdgpu_dm_crtc_configure_crc_source(
7273 crtc, dm_new_crtc_state,
7274 dm_new_crtc_state->crc_src);
7281 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7282 * @crtc_state: the DRM CRTC state
7283 * @stream_state: the DC stream state.
7285 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7286 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7288 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7289 struct dc_stream_state *stream_state)
7291 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7294 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7295 struct drm_atomic_state *state,
7298 struct drm_crtc *crtc;
7299 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7300 struct amdgpu_device *adev = dev->dev_private;
7304 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7305 * a modeset, being disabled, or have no active planes.
7307 * It's done in atomic commit rather than commit tail for now since
7308 * some of these interrupt handlers access the current CRTC state and
7309 * potentially the stream pointer itself.
7311 * Since the atomic state is swapped within atomic commit and not within
7312 * commit tail this would leave to new state (that hasn't been committed yet)
7313 * being accesssed from within the handlers.
7315 * TODO: Fix this so we can do this in commit tail and not have to block
7318 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7319 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7320 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7321 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7323 if (dm_old_crtc_state->interrupts_enabled &&
7324 (!dm_new_crtc_state->interrupts_enabled ||
7325 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7326 manage_dm_interrupts(adev, acrtc, false);
7329 * Add check here for SoC's that support hardware cursor plane, to
7330 * unset legacy_cursor_update
7333 return drm_atomic_helper_commit(dev, state, nonblock);
7335 /*TODO Handle EINTR, reenable IRQ*/
7339 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7340 * @state: The atomic state to commit
7342 * This will tell DC to commit the constructed DC state from atomic_check,
7343 * programming the hardware. Any failures here implies a hardware failure, since
7344 * atomic check should have filtered anything non-kosher.
7346 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7348 struct drm_device *dev = state->dev;
7349 struct amdgpu_device *adev = dev->dev_private;
7350 struct amdgpu_display_manager *dm = &adev->dm;
7351 struct dm_atomic_state *dm_state;
7352 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7354 struct drm_crtc *crtc;
7355 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7356 unsigned long flags;
7357 bool wait_for_vblank = true;
7358 struct drm_connector *connector;
7359 struct drm_connector_state *old_con_state, *new_con_state;
7360 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7361 int crtc_disable_count = 0;
7363 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7365 dm_state = dm_atomic_get_new_state(state);
7366 if (dm_state && dm_state->context) {
7367 dc_state = dm_state->context;
7369 /* No state changes, retain current state. */
7370 dc_state_temp = dc_create_state(dm->dc);
7371 ASSERT(dc_state_temp);
7372 dc_state = dc_state_temp;
7373 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7376 /* update changed items */
7377 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7378 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7380 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7381 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7384 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7385 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7386 "connectors_changed:%d\n",
7388 new_crtc_state->enable,
7389 new_crtc_state->active,
7390 new_crtc_state->planes_changed,
7391 new_crtc_state->mode_changed,
7392 new_crtc_state->active_changed,
7393 new_crtc_state->connectors_changed);
7395 /* Copy all transient state flags into dc state */
7396 if (dm_new_crtc_state->stream) {
7397 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7398 dm_new_crtc_state->stream);
7401 /* handles headless hotplug case, updating new_state and
7402 * aconnector as needed
7405 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7407 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7409 if (!dm_new_crtc_state->stream) {
7411 * this could happen because of issues with
7412 * userspace notifications delivery.
7413 * In this case userspace tries to set mode on
7414 * display which is disconnected in fact.
7415 * dc_sink is NULL in this case on aconnector.
7416 * We expect reset mode will come soon.
7418 * This can also happen when unplug is done
7419 * during resume sequence ended
7421 * In this case, we want to pretend we still
7422 * have a sink to keep the pipe running so that
7423 * hw state is consistent with the sw state
7425 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7426 __func__, acrtc->base.base.id);
7430 if (dm_old_crtc_state->stream)
7431 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7433 pm_runtime_get_noresume(dev->dev);
7435 acrtc->enabled = true;
7436 acrtc->hw_mode = new_crtc_state->mode;
7437 crtc->hwmode = new_crtc_state->mode;
7438 } else if (modereset_required(new_crtc_state)) {
7439 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7440 /* i.e. reset mode */
7441 if (dm_old_crtc_state->stream) {
7442 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7443 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7445 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7448 } /* for_each_crtc_in_state() */
7451 dm_enable_per_frame_crtc_master_sync(dc_state);
7452 mutex_lock(&dm->dc_lock);
7453 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7454 mutex_unlock(&dm->dc_lock);
7457 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7458 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7460 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7462 if (dm_new_crtc_state->stream != NULL) {
7463 const struct dc_stream_status *status =
7464 dc_stream_get_status(dm_new_crtc_state->stream);
7467 status = dc_stream_get_status_from_state(dc_state,
7468 dm_new_crtc_state->stream);
7471 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7473 acrtc->otg_inst = status->primary_otg_inst;
7476 #ifdef CONFIG_DRM_AMD_DC_HDCP
7477 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7478 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7479 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7480 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7482 new_crtc_state = NULL;
7485 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7487 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7489 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7490 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7491 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7492 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7496 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7497 hdcp_update_display(
7498 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7499 new_con_state->hdcp_content_type,
7500 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7505 /* Handle connector state changes */
7506 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7507 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7508 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7509 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7510 struct dc_surface_update dummy_updates[MAX_SURFACES];
7511 struct dc_stream_update stream_update;
7512 struct dc_info_packet hdr_packet;
7513 struct dc_stream_status *status = NULL;
7514 bool abm_changed, hdr_changed, scaling_changed;
7516 memset(&dummy_updates, 0, sizeof(dummy_updates));
7517 memset(&stream_update, 0, sizeof(stream_update));
7520 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7521 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7524 /* Skip any modesets/resets */
7525 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7529 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7531 scaling_changed = is_scaling_state_different(dm_new_con_state,
7534 abm_changed = dm_new_crtc_state->abm_level !=
7535 dm_old_crtc_state->abm_level;
7538 is_hdr_metadata_different(old_con_state, new_con_state);
7540 if (!scaling_changed && !abm_changed && !hdr_changed)
7543 stream_update.stream = dm_new_crtc_state->stream;
7544 if (scaling_changed) {
7545 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7546 dm_new_con_state, dm_new_crtc_state->stream);
7548 stream_update.src = dm_new_crtc_state->stream->src;
7549 stream_update.dst = dm_new_crtc_state->stream->dst;
7553 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7555 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7559 fill_hdr_info_packet(new_con_state, &hdr_packet);
7560 stream_update.hdr_static_metadata = &hdr_packet;
7563 status = dc_stream_get_status(dm_new_crtc_state->stream);
7565 WARN_ON(!status->plane_count);
7568 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7569 * Here we create an empty update on each plane.
7570 * To fix this, DC should permit updating only stream properties.
7572 for (j = 0; j < status->plane_count; j++)
7573 dummy_updates[j].surface = status->plane_states[0];
7576 mutex_lock(&dm->dc_lock);
7577 dc_commit_updates_for_stream(dm->dc,
7579 status->plane_count,
7580 dm_new_crtc_state->stream,
7583 mutex_unlock(&dm->dc_lock);
7586 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7587 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7588 new_crtc_state, i) {
7589 if (old_crtc_state->active && !new_crtc_state->active)
7590 crtc_disable_count++;
7592 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7593 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7595 /* Update freesync active state. */
7596 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7598 /* Handle vrr on->off / off->on transitions */
7599 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7603 /* Enable interrupts for CRTCs going through a modeset. */
7604 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7606 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7607 if (new_crtc_state->async_flip)
7608 wait_for_vblank = false;
7610 /* update planes when needed per crtc*/
7611 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7612 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7614 if (dm_new_crtc_state->stream)
7615 amdgpu_dm_commit_planes(state, dc_state, dev,
7616 dm, crtc, wait_for_vblank);
7619 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7620 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7622 /* Update audio instances for each connector. */
7623 amdgpu_dm_commit_audio(dev, state);
7626 * send vblank event on all events not handled in flip and
7627 * mark consumed event for drm_atomic_helper_commit_hw_done
7629 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7630 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7632 if (new_crtc_state->event)
7633 drm_send_event_locked(dev, &new_crtc_state->event->base);
7635 new_crtc_state->event = NULL;
7637 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7639 /* Signal HW programming completion */
7640 drm_atomic_helper_commit_hw_done(state);
7642 if (wait_for_vblank)
7643 drm_atomic_helper_wait_for_flip_done(dev, state);
7645 drm_atomic_helper_cleanup_planes(dev, state);
7648 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7649 * so we can put the GPU into runtime suspend if we're not driving any
7652 for (i = 0; i < crtc_disable_count; i++)
7653 pm_runtime_put_autosuspend(dev->dev);
7654 pm_runtime_mark_last_busy(dev->dev);
7657 dc_release_state(dc_state_temp);
7661 static int dm_force_atomic_commit(struct drm_connector *connector)
7664 struct drm_device *ddev = connector->dev;
7665 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7666 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7667 struct drm_plane *plane = disconnected_acrtc->base.primary;
7668 struct drm_connector_state *conn_state;
7669 struct drm_crtc_state *crtc_state;
7670 struct drm_plane_state *plane_state;
7675 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7677 /* Construct an atomic state to restore previous display setting */
7680 * Attach connectors to drm_atomic_state
7682 conn_state = drm_atomic_get_connector_state(state, connector);
7684 ret = PTR_ERR_OR_ZERO(conn_state);
7688 /* Attach crtc to drm_atomic_state*/
7689 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7691 ret = PTR_ERR_OR_ZERO(crtc_state);
7695 /* force a restore */
7696 crtc_state->mode_changed = true;
7698 /* Attach plane to drm_atomic_state */
7699 plane_state = drm_atomic_get_plane_state(state, plane);
7701 ret = PTR_ERR_OR_ZERO(plane_state);
7706 /* Call commit internally with the state we just constructed */
7707 ret = drm_atomic_commit(state);
7712 DRM_ERROR("Restoring old state failed with %i\n", ret);
7713 drm_atomic_state_put(state);
7719 * This function handles all cases when set mode does not come upon hotplug.
7720 * This includes when a display is unplugged then plugged back into the
7721 * same port and when running without usermode desktop manager supprot
7723 void dm_restore_drm_connector_state(struct drm_device *dev,
7724 struct drm_connector *connector)
7726 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7727 struct amdgpu_crtc *disconnected_acrtc;
7728 struct dm_crtc_state *acrtc_state;
7730 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7733 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7734 if (!disconnected_acrtc)
7737 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7738 if (!acrtc_state->stream)
7742 * If the previous sink is not released and different from the current,
7743 * we deduce we are in a state where we can not rely on usermode call
7744 * to turn on the display, so we do it here
7746 if (acrtc_state->stream->sink != aconnector->dc_sink)
7747 dm_force_atomic_commit(&aconnector->base);
7751 * Grabs all modesetting locks to serialize against any blocking commits,
7752 * Waits for completion of all non blocking commits.
7754 static int do_aquire_global_lock(struct drm_device *dev,
7755 struct drm_atomic_state *state)
7757 struct drm_crtc *crtc;
7758 struct drm_crtc_commit *commit;
7762 * Adding all modeset locks to aquire_ctx will
7763 * ensure that when the framework release it the
7764 * extra locks we are locking here will get released to
7766 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7770 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7771 spin_lock(&crtc->commit_lock);
7772 commit = list_first_entry_or_null(&crtc->commit_list,
7773 struct drm_crtc_commit, commit_entry);
7775 drm_crtc_commit_get(commit);
7776 spin_unlock(&crtc->commit_lock);
7782 * Make sure all pending HW programming completed and
7785 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7788 ret = wait_for_completion_interruptible_timeout(
7789 &commit->flip_done, 10*HZ);
7792 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7793 "timed out\n", crtc->base.id, crtc->name);
7795 drm_crtc_commit_put(commit);
7798 return ret < 0 ? ret : 0;
7801 static void get_freesync_config_for_crtc(
7802 struct dm_crtc_state *new_crtc_state,
7803 struct dm_connector_state *new_con_state)
7805 struct mod_freesync_config config = {0};
7806 struct amdgpu_dm_connector *aconnector =
7807 to_amdgpu_dm_connector(new_con_state->base.connector);
7808 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7809 int vrefresh = drm_mode_vrefresh(mode);
7811 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7812 vrefresh >= aconnector->min_vfreq &&
7813 vrefresh <= aconnector->max_vfreq;
7815 if (new_crtc_state->vrr_supported) {
7816 new_crtc_state->stream->ignore_msa_timing_param = true;
7817 config.state = new_crtc_state->base.vrr_enabled ?
7818 VRR_STATE_ACTIVE_VARIABLE :
7820 config.min_refresh_in_uhz =
7821 aconnector->min_vfreq * 1000000;
7822 config.max_refresh_in_uhz =
7823 aconnector->max_vfreq * 1000000;
7824 config.vsif_supported = true;
7828 new_crtc_state->freesync_config = config;
7831 static void reset_freesync_config_for_crtc(
7832 struct dm_crtc_state *new_crtc_state)
7834 new_crtc_state->vrr_supported = false;
7836 memset(&new_crtc_state->vrr_params, 0,
7837 sizeof(new_crtc_state->vrr_params));
7838 memset(&new_crtc_state->vrr_infopacket, 0,
7839 sizeof(new_crtc_state->vrr_infopacket));
7842 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7843 struct drm_atomic_state *state,
7844 struct drm_crtc *crtc,
7845 struct drm_crtc_state *old_crtc_state,
7846 struct drm_crtc_state *new_crtc_state,
7848 bool *lock_and_validation_needed)
7850 struct dm_atomic_state *dm_state = NULL;
7851 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7852 struct dc_stream_state *new_stream;
7856 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7857 * update changed items
7859 struct amdgpu_crtc *acrtc = NULL;
7860 struct amdgpu_dm_connector *aconnector = NULL;
7861 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7862 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7866 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7867 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7868 acrtc = to_amdgpu_crtc(crtc);
7869 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7871 /* TODO This hack should go away */
7872 if (aconnector && enable) {
7873 /* Make sure fake sink is created in plug-in scenario */
7874 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7876 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7879 if (IS_ERR(drm_new_conn_state)) {
7880 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7884 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7885 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7887 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7890 new_stream = create_validate_stream_for_sink(aconnector,
7891 &new_crtc_state->mode,
7893 dm_old_crtc_state->stream);
7896 * we can have no stream on ACTION_SET if a display
7897 * was disconnected during S3, in this case it is not an
7898 * error, the OS will be updated after detection, and
7899 * will do the right thing on next atomic commit
7903 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7904 __func__, acrtc->base.base.id);
7909 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7911 ret = fill_hdr_info_packet(drm_new_conn_state,
7912 &new_stream->hdr_static_metadata);
7917 * If we already removed the old stream from the context
7918 * (and set the new stream to NULL) then we can't reuse
7919 * the old stream even if the stream and scaling are unchanged.
7920 * We'll hit the BUG_ON and black screen.
7922 * TODO: Refactor this function to allow this check to work
7923 * in all conditions.
7925 if (dm_new_crtc_state->stream &&
7926 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7927 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7928 new_crtc_state->mode_changed = false;
7929 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7930 new_crtc_state->mode_changed);
7934 /* mode_changed flag may get updated above, need to check again */
7935 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7939 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7940 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7941 "connectors_changed:%d\n",
7943 new_crtc_state->enable,
7944 new_crtc_state->active,
7945 new_crtc_state->planes_changed,
7946 new_crtc_state->mode_changed,
7947 new_crtc_state->active_changed,
7948 new_crtc_state->connectors_changed);
7950 /* Remove stream for any changed/disabled CRTC */
7953 if (!dm_old_crtc_state->stream)
7956 ret = dm_atomic_get_state(state, &dm_state);
7960 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7963 /* i.e. reset mode */
7964 if (dc_remove_stream_from_ctx(
7967 dm_old_crtc_state->stream) != DC_OK) {
7972 dc_stream_release(dm_old_crtc_state->stream);
7973 dm_new_crtc_state->stream = NULL;
7975 reset_freesync_config_for_crtc(dm_new_crtc_state);
7977 *lock_and_validation_needed = true;
7979 } else {/* Add stream for any updated/enabled CRTC */
7981 * Quick fix to prevent NULL pointer on new_stream when
7982 * added MST connectors not found in existing crtc_state in the chained mode
7983 * TODO: need to dig out the root cause of that
7985 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7988 if (modereset_required(new_crtc_state))
7991 if (modeset_required(new_crtc_state, new_stream,
7992 dm_old_crtc_state->stream)) {
7994 WARN_ON(dm_new_crtc_state->stream);
7996 ret = dm_atomic_get_state(state, &dm_state);
8000 dm_new_crtc_state->stream = new_stream;
8002 dc_stream_retain(new_stream);
8004 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8007 if (dc_add_stream_to_ctx(
8010 dm_new_crtc_state->stream) != DC_OK) {
8015 *lock_and_validation_needed = true;
8020 /* Release extra reference */
8022 dc_stream_release(new_stream);
8025 * We want to do dc stream updates that do not require a
8026 * full modeset below.
8028 if (!(enable && aconnector && new_crtc_state->enable &&
8029 new_crtc_state->active))
8032 * Given above conditions, the dc state cannot be NULL because:
8033 * 1. We're in the process of enabling CRTCs (just been added
8034 * to the dc context, or already is on the context)
8035 * 2. Has a valid connector attached, and
8036 * 3. Is currently active and enabled.
8037 * => The dc stream state currently exists.
8039 BUG_ON(dm_new_crtc_state->stream == NULL);
8041 /* Scaling or underscan settings */
8042 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8043 update_stream_scaling_settings(
8044 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8047 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8050 * Color management settings. We also update color properties
8051 * when a modeset is needed, to ensure it gets reprogrammed.
8053 if (dm_new_crtc_state->base.color_mgmt_changed ||
8054 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8055 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8060 /* Update Freesync settings. */
8061 get_freesync_config_for_crtc(dm_new_crtc_state,
8068 dc_stream_release(new_stream);
8072 static bool should_reset_plane(struct drm_atomic_state *state,
8073 struct drm_plane *plane,
8074 struct drm_plane_state *old_plane_state,
8075 struct drm_plane_state *new_plane_state)
8077 struct drm_plane *other;
8078 struct drm_plane_state *old_other_state, *new_other_state;
8079 struct drm_crtc_state *new_crtc_state;
8083 * TODO: Remove this hack once the checks below are sufficient
8084 * enough to determine when we need to reset all the planes on
8087 if (state->allow_modeset)
8090 /* Exit early if we know that we're adding or removing the plane. */
8091 if (old_plane_state->crtc != new_plane_state->crtc)
8094 /* old crtc == new_crtc == NULL, plane not in context. */
8095 if (!new_plane_state->crtc)
8099 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8101 if (!new_crtc_state)
8104 /* CRTC Degamma changes currently require us to recreate planes. */
8105 if (new_crtc_state->color_mgmt_changed)
8108 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8112 * If there are any new primary or overlay planes being added or
8113 * removed then the z-order can potentially change. To ensure
8114 * correct z-order and pipe acquisition the current DC architecture
8115 * requires us to remove and recreate all existing planes.
8117 * TODO: Come up with a more elegant solution for this.
8119 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8120 if (other->type == DRM_PLANE_TYPE_CURSOR)
8123 if (old_other_state->crtc != new_plane_state->crtc &&
8124 new_other_state->crtc != new_plane_state->crtc)
8127 if (old_other_state->crtc != new_other_state->crtc)
8130 /* TODO: Remove this once we can handle fast format changes. */
8131 if (old_other_state->fb && new_other_state->fb &&
8132 old_other_state->fb->format != new_other_state->fb->format)
8139 static int dm_update_plane_state(struct dc *dc,
8140 struct drm_atomic_state *state,
8141 struct drm_plane *plane,
8142 struct drm_plane_state *old_plane_state,
8143 struct drm_plane_state *new_plane_state,
8145 bool *lock_and_validation_needed)
8148 struct dm_atomic_state *dm_state = NULL;
8149 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8150 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8151 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8152 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8153 struct amdgpu_crtc *new_acrtc;
8158 new_plane_crtc = new_plane_state->crtc;
8159 old_plane_crtc = old_plane_state->crtc;
8160 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8161 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8163 /*TODO Implement better atomic check for cursor plane */
8164 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8165 if (!enable || !new_plane_crtc ||
8166 drm_atomic_plane_disabling(plane->state, new_plane_state))
8169 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8171 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8172 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8173 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8174 new_plane_state->crtc_w, new_plane_state->crtc_h);
8181 needs_reset = should_reset_plane(state, plane, old_plane_state,
8184 /* Remove any changed/removed planes */
8189 if (!old_plane_crtc)
8192 old_crtc_state = drm_atomic_get_old_crtc_state(
8193 state, old_plane_crtc);
8194 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8196 if (!dm_old_crtc_state->stream)
8199 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8200 plane->base.id, old_plane_crtc->base.id);
8202 ret = dm_atomic_get_state(state, &dm_state);
8206 if (!dc_remove_plane_from_context(
8208 dm_old_crtc_state->stream,
8209 dm_old_plane_state->dc_state,
8210 dm_state->context)) {
8217 dc_plane_state_release(dm_old_plane_state->dc_state);
8218 dm_new_plane_state->dc_state = NULL;
8220 *lock_and_validation_needed = true;
8222 } else { /* Add new planes */
8223 struct dc_plane_state *dc_new_plane_state;
8225 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8228 if (!new_plane_crtc)
8231 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8232 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8234 if (!dm_new_crtc_state->stream)
8240 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8244 WARN_ON(dm_new_plane_state->dc_state);
8246 dc_new_plane_state = dc_create_plane_state(dc);
8247 if (!dc_new_plane_state)
8250 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8251 plane->base.id, new_plane_crtc->base.id);
8253 ret = fill_dc_plane_attributes(
8254 new_plane_crtc->dev->dev_private,
8259 dc_plane_state_release(dc_new_plane_state);
8263 ret = dm_atomic_get_state(state, &dm_state);
8265 dc_plane_state_release(dc_new_plane_state);
8270 * Any atomic check errors that occur after this will
8271 * not need a release. The plane state will be attached
8272 * to the stream, and therefore part of the atomic
8273 * state. It'll be released when the atomic state is
8276 if (!dc_add_plane_to_context(
8278 dm_new_crtc_state->stream,
8280 dm_state->context)) {
8282 dc_plane_state_release(dc_new_plane_state);
8286 dm_new_plane_state->dc_state = dc_new_plane_state;
8288 /* Tell DC to do a full surface update every time there
8289 * is a plane change. Inefficient, but works for now.
8291 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8293 *lock_and_validation_needed = true;
8301 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8302 struct drm_atomic_state *state,
8303 enum surface_update_type *out_type)
8305 struct dc *dc = dm->dc;
8306 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8307 int i, j, num_plane, ret = 0;
8308 struct drm_plane_state *old_plane_state, *new_plane_state;
8309 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8310 struct drm_crtc *new_plane_crtc;
8311 struct drm_plane *plane;
8313 struct drm_crtc *crtc;
8314 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8315 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8316 struct dc_stream_status *status = NULL;
8317 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8318 struct surface_info_bundle {
8319 struct dc_surface_update surface_updates[MAX_SURFACES];
8320 struct dc_plane_info plane_infos[MAX_SURFACES];
8321 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8322 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8323 struct dc_stream_update stream_update;
8326 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8329 DRM_ERROR("Failed to allocate update bundle\n");
8330 /* Set type to FULL to avoid crashing in DC*/
8331 update_type = UPDATE_TYPE_FULL;
8335 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8337 memset(bundle, 0, sizeof(struct surface_info_bundle));
8339 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8340 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8343 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8344 update_type = UPDATE_TYPE_FULL;
8348 if (!new_dm_crtc_state->stream)
8351 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8352 const struct amdgpu_framebuffer *amdgpu_fb =
8353 to_amdgpu_framebuffer(new_plane_state->fb);
8354 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8355 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8356 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8357 uint64_t tiling_flags;
8358 bool tmz_surface = false;
8360 new_plane_crtc = new_plane_state->crtc;
8361 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8362 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8364 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8367 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8368 update_type = UPDATE_TYPE_FULL;
8372 if (crtc != new_plane_crtc)
8375 bundle->surface_updates[num_plane].surface =
8376 new_dm_plane_state->dc_state;
8378 if (new_crtc_state->mode_changed) {
8379 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8380 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8383 if (new_crtc_state->color_mgmt_changed) {
8384 bundle->surface_updates[num_plane].gamma =
8385 new_dm_plane_state->dc_state->gamma_correction;
8386 bundle->surface_updates[num_plane].in_transfer_func =
8387 new_dm_plane_state->dc_state->in_transfer_func;
8388 bundle->surface_updates[num_plane].gamut_remap_matrix =
8389 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8390 bundle->stream_update.gamut_remap =
8391 &new_dm_crtc_state->stream->gamut_remap_matrix;
8392 bundle->stream_update.output_csc_transform =
8393 &new_dm_crtc_state->stream->csc_color_matrix;
8394 bundle->stream_update.out_transfer_func =
8395 new_dm_crtc_state->stream->out_transfer_func;
8398 ret = fill_dc_scaling_info(new_plane_state,
8403 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8406 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8410 ret = fill_dc_plane_info_and_addr(
8411 dm->adev, new_plane_state, tiling_flags,
8413 &flip_addr->address, tmz_surface,
8418 bundle->surface_updates[num_plane].plane_info = plane_info;
8419 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8428 ret = dm_atomic_get_state(state, &dm_state);
8432 old_dm_state = dm_atomic_get_old_state(state);
8433 if (!old_dm_state) {
8438 status = dc_stream_get_status_from_state(old_dm_state->context,
8439 new_dm_crtc_state->stream);
8440 bundle->stream_update.stream = new_dm_crtc_state->stream;
8442 * TODO: DC modifies the surface during this call so we need
8443 * to lock here - find a way to do this without locking.
8445 mutex_lock(&dm->dc_lock);
8446 update_type = dc_check_update_surfaces_for_stream(
8447 dc, bundle->surface_updates, num_plane,
8448 &bundle->stream_update, status);
8449 mutex_unlock(&dm->dc_lock);
8451 if (update_type > UPDATE_TYPE_MED) {
8452 update_type = UPDATE_TYPE_FULL;
8460 *out_type = update_type;
8464 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8466 struct drm_connector *connector;
8467 struct drm_connector_state *conn_state;
8468 struct amdgpu_dm_connector *aconnector = NULL;
8470 for_each_new_connector_in_state(state, connector, conn_state, i) {
8471 if (conn_state->crtc != crtc)
8474 aconnector = to_amdgpu_dm_connector(connector);
8475 if (!aconnector->port || !aconnector->mst_port)
8484 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8488 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8489 * @dev: The DRM device
8490 * @state: The atomic state to commit
8492 * Validate that the given atomic state is programmable by DC into hardware.
8493 * This involves constructing a &struct dc_state reflecting the new hardware
8494 * state we wish to commit, then querying DC to see if it is programmable. It's
8495 * important not to modify the existing DC state. Otherwise, atomic_check
8496 * may unexpectedly commit hardware changes.
8498 * When validating the DC state, it's important that the right locks are
8499 * acquired. For full updates case which removes/adds/updates streams on one
8500 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8501 * that any such full update commit will wait for completion of any outstanding
8502 * flip using DRMs synchronization events. See
8503 * dm_determine_update_type_for_commit()
8505 * Note that DM adds the affected connectors for all CRTCs in state, when that
8506 * might not seem necessary. This is because DC stream creation requires the
8507 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8508 * be possible but non-trivial - a possible TODO item.
8510 * Return: -Error code if validation failed.
8512 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8513 struct drm_atomic_state *state)
8515 struct amdgpu_device *adev = dev->dev_private;
8516 struct dm_atomic_state *dm_state = NULL;
8517 struct dc *dc = adev->dm.dc;
8518 struct drm_connector *connector;
8519 struct drm_connector_state *old_con_state, *new_con_state;
8520 struct drm_crtc *crtc;
8521 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8522 struct drm_plane *plane;
8523 struct drm_plane_state *old_plane_state, *new_plane_state;
8524 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8525 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8526 enum dc_status status;
8530 * This bool will be set for true for any modeset/reset
8531 * or plane update which implies non fast surface update.
8533 bool lock_and_validation_needed = false;
8535 ret = drm_atomic_helper_check_modeset(dev, state);
8539 if (adev->asic_type >= CHIP_NAVI10) {
8540 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8541 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8542 ret = add_affected_mst_dsc_crtcs(state, crtc);
8549 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8550 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8551 !new_crtc_state->color_mgmt_changed &&
8552 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8555 if (!new_crtc_state->enable)
8558 ret = drm_atomic_add_affected_connectors(state, crtc);
8562 ret = drm_atomic_add_affected_planes(state, crtc);
8568 * Add all primary and overlay planes on the CRTC to the state
8569 * whenever a plane is enabled to maintain correct z-ordering
8570 * and to enable fast surface updates.
8572 drm_for_each_crtc(crtc, dev) {
8573 bool modified = false;
8575 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8576 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8579 if (new_plane_state->crtc == crtc ||
8580 old_plane_state->crtc == crtc) {
8589 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8590 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8594 drm_atomic_get_plane_state(state, plane);
8596 if (IS_ERR(new_plane_state)) {
8597 ret = PTR_ERR(new_plane_state);
8603 /* Remove exiting planes if they are modified */
8604 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8605 ret = dm_update_plane_state(dc, state, plane,
8609 &lock_and_validation_needed);
8614 /* Disable all crtcs which require disable */
8615 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8616 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8620 &lock_and_validation_needed);
8625 /* Enable all crtcs which require enable */
8626 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8627 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8631 &lock_and_validation_needed);
8636 /* Add new/modified planes */
8637 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8638 ret = dm_update_plane_state(dc, state, plane,
8642 &lock_and_validation_needed);
8647 /* Run this here since we want to validate the streams we created */
8648 ret = drm_atomic_helper_check_planes(dev, state);
8652 if (state->legacy_cursor_update) {
8654 * This is a fast cursor update coming from the plane update
8655 * helper, check if it can be done asynchronously for better
8658 state->async_update =
8659 !drm_atomic_helper_async_check(dev, state);
8662 * Skip the remaining global validation if this is an async
8663 * update. Cursor updates can be done without affecting
8664 * state or bandwidth calcs and this avoids the performance
8665 * penalty of locking the private state object and
8666 * allocating a new dc_state.
8668 if (state->async_update)
8672 /* Check scaling and underscan changes*/
8673 /* TODO Removed scaling changes validation due to inability to commit
8674 * new stream into context w\o causing full reset. Need to
8675 * decide how to handle.
8677 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8678 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8679 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8680 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8682 /* Skip any modesets/resets */
8683 if (!acrtc || drm_atomic_crtc_needs_modeset(
8684 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8687 /* Skip any thing not scale or underscan changes */
8688 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8691 overall_update_type = UPDATE_TYPE_FULL;
8692 lock_and_validation_needed = true;
8695 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8699 if (overall_update_type < update_type)
8700 overall_update_type = update_type;
8703 * lock_and_validation_needed was an old way to determine if we need to set
8704 * the global lock. Leaving it in to check if we broke any corner cases
8705 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8706 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8708 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8709 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8711 if (overall_update_type > UPDATE_TYPE_FAST) {
8712 ret = dm_atomic_get_state(state, &dm_state);
8716 ret = do_aquire_global_lock(dev, state);
8720 #if defined(CONFIG_DRM_AMD_DC_DCN)
8721 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8724 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8730 * Perform validation of MST topology in the state:
8731 * We need to perform MST atomic check before calling
8732 * dc_validate_global_state(), or there is a chance
8733 * to get stuck in an infinite loop and hang eventually.
8735 ret = drm_dp_mst_atomic_check(state);
8738 status = dc_validate_global_state(dc, dm_state->context, false);
8739 if (status != DC_OK) {
8740 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8741 dc_status_to_str(status), status);
8747 * The commit is a fast update. Fast updates shouldn't change
8748 * the DC context, affect global validation, and can have their
8749 * commit work done in parallel with other commits not touching
8750 * the same resource. If we have a new DC context as part of
8751 * the DM atomic state from validation we need to free it and
8752 * retain the existing one instead.
8754 struct dm_atomic_state *new_dm_state, *old_dm_state;
8756 new_dm_state = dm_atomic_get_new_state(state);
8757 old_dm_state = dm_atomic_get_old_state(state);
8759 if (new_dm_state && old_dm_state) {
8760 if (new_dm_state->context)
8761 dc_release_state(new_dm_state->context);
8763 new_dm_state->context = old_dm_state->context;
8765 if (old_dm_state->context)
8766 dc_retain_state(old_dm_state->context);
8770 /* Store the overall update type for use later in atomic check. */
8771 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8772 struct dm_crtc_state *dm_new_crtc_state =
8773 to_dm_crtc_state(new_crtc_state);
8775 dm_new_crtc_state->update_type = (int)overall_update_type;
8778 /* Must be success */
8783 if (ret == -EDEADLK)
8784 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8785 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8786 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8788 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8793 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8794 struct amdgpu_dm_connector *amdgpu_dm_connector)
8797 bool capable = false;
8799 if (amdgpu_dm_connector->dc_link &&
8800 dm_helpers_dp_read_dpcd(
8802 amdgpu_dm_connector->dc_link,
8803 DP_DOWN_STREAM_PORT_COUNT,
8805 sizeof(dpcd_data))) {
8806 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8811 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8815 bool edid_check_required;
8816 struct detailed_timing *timing;
8817 struct detailed_non_pixel *data;
8818 struct detailed_data_monitor_range *range;
8819 struct amdgpu_dm_connector *amdgpu_dm_connector =
8820 to_amdgpu_dm_connector(connector);
8821 struct dm_connector_state *dm_con_state = NULL;
8823 struct drm_device *dev = connector->dev;
8824 struct amdgpu_device *adev = dev->dev_private;
8825 bool freesync_capable = false;
8827 if (!connector->state) {
8828 DRM_ERROR("%s - Connector has no state", __func__);
8833 dm_con_state = to_dm_connector_state(connector->state);
8835 amdgpu_dm_connector->min_vfreq = 0;
8836 amdgpu_dm_connector->max_vfreq = 0;
8837 amdgpu_dm_connector->pixel_clock_mhz = 0;
8842 dm_con_state = to_dm_connector_state(connector->state);
8844 edid_check_required = false;
8845 if (!amdgpu_dm_connector->dc_sink) {
8846 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8849 if (!adev->dm.freesync_module)
8852 * if edid non zero restrict freesync only for dp and edp
8855 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8856 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8857 edid_check_required = is_dp_capable_without_timing_msa(
8859 amdgpu_dm_connector);
8862 if (edid_check_required == true && (edid->version > 1 ||
8863 (edid->version == 1 && edid->revision > 1))) {
8864 for (i = 0; i < 4; i++) {
8866 timing = &edid->detailed_timings[i];
8867 data = &timing->data.other_data;
8868 range = &data->data.range;
8870 * Check if monitor has continuous frequency mode
8872 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8875 * Check for flag range limits only. If flag == 1 then
8876 * no additional timing information provided.
8877 * Default GTF, GTF Secondary curve and CVT are not
8880 if (range->flags != 1)
8883 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8884 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8885 amdgpu_dm_connector->pixel_clock_mhz =
8886 range->pixel_clock_mhz * 10;
8890 if (amdgpu_dm_connector->max_vfreq -
8891 amdgpu_dm_connector->min_vfreq > 10) {
8893 freesync_capable = true;
8899 dm_con_state->freesync_capable = freesync_capable;
8901 if (connector->vrr_capable_property)
8902 drm_connector_set_vrr_capable_property(connector,
8906 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8908 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8910 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8912 if (link->type == dc_connection_none)
8914 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8915 dpcd_data, sizeof(dpcd_data))) {
8916 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8918 if (dpcd_data[0] == 0) {
8919 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8920 link->psr_settings.psr_feature_enabled = false;
8922 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8923 link->psr_settings.psr_feature_enabled = true;
8926 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8931 * amdgpu_dm_link_setup_psr() - configure psr link
8932 * @stream: stream state
8934 * Return: true if success
8936 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8938 struct dc_link *link = NULL;
8939 struct psr_config psr_config = {0};
8940 struct psr_context psr_context = {0};
8946 link = stream->link;
8948 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8950 if (psr_config.psr_version > 0) {
8951 psr_config.psr_exit_link_training_required = 0x1;
8952 psr_config.psr_frame_capture_indication_req = 0;
8953 psr_config.psr_rfb_setup_time = 0x37;
8954 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8955 psr_config.allow_smu_optimizations = 0x0;
8957 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8960 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8966 * amdgpu_dm_psr_enable() - enable psr f/w
8967 * @stream: stream state
8969 * Return: true if success
8971 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8973 struct dc_link *link = stream->link;
8974 unsigned int vsync_rate_hz = 0;
8975 struct dc_static_screen_params params = {0};
8976 /* Calculate number of static frames before generating interrupt to
8979 // Init fail safe of 2 frames static
8980 unsigned int num_frames_static = 2;
8982 DRM_DEBUG_DRIVER("Enabling psr...\n");
8984 vsync_rate_hz = div64_u64(div64_u64((
8985 stream->timing.pix_clk_100hz * 100),
8986 stream->timing.v_total),
8987 stream->timing.h_total);
8990 * Calculate number of frames such that at least 30 ms of time has
8993 if (vsync_rate_hz != 0) {
8994 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8995 num_frames_static = (30000 / frame_time_microsec) + 1;
8998 params.triggers.cursor_update = true;
8999 params.triggers.overlay_update = true;
9000 params.triggers.surface_update = true;
9001 params.num_frames = num_frames_static;
9003 dc_stream_set_static_screen_params(link->ctx->dc,
9007 return dc_link_set_psr_allow_active(link, true, false);
9011 * amdgpu_dm_psr_disable() - disable psr f/w
9012 * @stream: stream state
9014 * Return: true if success
9016 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9019 DRM_DEBUG_DRIVER("Disabling psr...\n");
9021 return dc_link_set_psr_allow_active(stream->link, false, true);