2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
49 #include "ivsrcid/ivsrcid_vislands30.h"
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/pci.h>
57 #include <linux/firmware.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_fourcc.h>
65 #include <drm/drm_edid.h>
66 #include <drm/drm_vblank.h>
68 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
69 #include "ivsrcid/irqsrcs_dcn_1_0.h"
71 #include "dcn/dcn_1_0_offset.h"
72 #include "dcn/dcn_1_0_sh_mask.h"
73 #include "soc15_hw_ip.h"
74 #include "vega10_ip_offset.h"
76 #include "soc15_common.h"
79 #include "modules/inc/mod_freesync.h"
80 #include "modules/power/power_helpers.h"
81 #include "modules/inc/mod_info_packet.h"
83 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
84 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
89 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
90 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
91 * requests into DC requests, and DC responses into DRM responses.
93 * The root control structure is &struct amdgpu_display_manager.
96 /* basic init/fini API */
97 static int amdgpu_dm_init(struct amdgpu_device *adev);
98 static void amdgpu_dm_fini(struct amdgpu_device *adev);
101 * initializes drm_device display related structures, based on the information
102 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
103 * drm_encoder, drm_mode_config
105 * Returns 0 on success
107 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
108 /* removes and deallocates the drm structures, created by the above function */
109 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
112 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
114 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
115 struct drm_plane *plane,
116 unsigned long possible_crtcs,
117 const struct dc_plane_cap *plane_cap);
118 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
119 struct drm_plane *plane,
120 uint32_t link_index);
121 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
122 struct amdgpu_dm_connector *amdgpu_dm_connector,
124 struct amdgpu_encoder *amdgpu_encoder);
125 static int amdgpu_dm_encoder_init(struct drm_device *dev,
126 struct amdgpu_encoder *aencoder,
127 uint32_t link_index);
129 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
131 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
132 struct drm_atomic_state *state,
135 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
137 static int amdgpu_dm_atomic_check(struct drm_device *dev,
138 struct drm_atomic_state *state);
140 static void handle_cursor_update(struct drm_plane *plane,
141 struct drm_plane_state *old_plane_state);
144 * dm_vblank_get_counter
147 * Get counter for number of vertical blanks
150 * struct amdgpu_device *adev - [in] desired amdgpu device
151 * int disp_idx - [in] which CRTC to get the counter from
154 * Counter for vertical blanks
156 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
158 if (crtc >= adev->mode_info.num_crtc)
161 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
162 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
166 if (acrtc_state->stream == NULL) {
167 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
172 return dc_stream_get_vblank_counter(acrtc_state->stream);
176 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
177 u32 *vbl, u32 *position)
179 uint32_t v_blank_start, v_blank_end, h_position, v_position;
181 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
184 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
185 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
188 if (acrtc_state->stream == NULL) {
189 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 * TODO rework base driver to use values directly.
196 * for now parse it back into reg-format
198 dc_stream_get_scanoutpos(acrtc_state->stream,
204 *position = v_position | (h_position << 16);
205 *vbl = v_blank_start | (v_blank_end << 16);
211 static bool dm_is_idle(void *handle)
217 static int dm_wait_for_idle(void *handle)
223 static bool dm_check_soft_reset(void *handle)
228 static int dm_soft_reset(void *handle)
234 static struct amdgpu_crtc *
235 get_crtc_by_otg_inst(struct amdgpu_device *adev,
238 struct drm_device *dev = adev->ddev;
239 struct drm_crtc *crtc;
240 struct amdgpu_crtc *amdgpu_crtc;
242 if (otg_inst == -1) {
244 return adev->mode_info.crtcs[0];
247 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
248 amdgpu_crtc = to_amdgpu_crtc(crtc);
250 if (amdgpu_crtc->otg_inst == otg_inst)
257 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
259 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
260 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
263 static void dm_pflip_high_irq(void *interrupt_params)
265 struct amdgpu_crtc *amdgpu_crtc;
266 struct common_irq_params *irq_params = interrupt_params;
267 struct amdgpu_device *adev = irq_params->adev;
269 struct drm_pending_vblank_event *e;
270 struct dm_crtc_state *acrtc_state;
271 uint32_t vpos, hpos, v_blank_start, v_blank_end;
274 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
276 /* IRQ could occur when in initial stage */
277 /* TODO work and BO cleanup */
278 if (amdgpu_crtc == NULL) {
279 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
283 spin_lock_irqsave(&adev->ddev->event_lock, flags);
285 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
286 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
287 amdgpu_crtc->pflip_status,
288 AMDGPU_FLIP_SUBMITTED,
289 amdgpu_crtc->crtc_id,
291 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
295 /* page flip completed. */
296 e = amdgpu_crtc->event;
297 amdgpu_crtc->event = NULL;
302 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
303 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
305 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
307 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
308 &v_blank_end, &hpos, &vpos) ||
309 (vpos < v_blank_start)) {
310 /* Update to correct count and vblank timestamp if racing with
311 * vblank irq. This also updates to the correct vblank timestamp
312 * even in VRR mode, as scanout is past the front-porch atm.
314 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
316 /* Wake up userspace by sending the pageflip event with proper
317 * count and timestamp of vblank of flip completion.
320 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
322 /* Event sent, so done with vblank for this flip */
323 drm_crtc_vblank_put(&amdgpu_crtc->base);
326 /* VRR active and inside front-porch: vblank count and
327 * timestamp for pageflip event will only be up to date after
328 * drm_crtc_handle_vblank() has been executed from late vblank
329 * irq handler after start of back-porch (vline 0). We queue the
330 * pageflip event for send-out by drm_crtc_handle_vblank() with
331 * updated timestamp and count, once it runs after us.
333 * We need to open-code this instead of using the helper
334 * drm_crtc_arm_vblank_event(), as that helper would
335 * call drm_crtc_accurate_vblank_count(), which we must
336 * not call in VRR mode while we are in front-porch!
339 /* sequence will be replaced by real count during send-out. */
340 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
341 e->pipe = amdgpu_crtc->crtc_id;
343 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
347 /* Keep track of vblank of this flip for flip throttling. We use the
348 * cooked hw counter, as that one incremented at start of this vblank
349 * of pageflip completion, so last_flip_vblank is the forbidden count
350 * for queueing new pageflips if vsync + VRR is enabled.
352 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
353 amdgpu_crtc->crtc_id);
355 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
356 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
358 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
359 amdgpu_crtc->crtc_id, amdgpu_crtc,
360 vrr_active, (int) !e);
363 static void dm_vupdate_high_irq(void *interrupt_params)
365 struct common_irq_params *irq_params = interrupt_params;
366 struct amdgpu_device *adev = irq_params->adev;
367 struct amdgpu_crtc *acrtc;
368 struct dm_crtc_state *acrtc_state;
371 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
374 acrtc_state = to_dm_crtc_state(acrtc->base.state);
376 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
377 amdgpu_dm_vrr_active(acrtc_state));
379 /* Core vblank handling is done here after end of front-porch in
380 * vrr mode, as vblank timestamping will give valid results
381 * while now done after front-porch. This will also deliver
382 * page-flip completion events that have been queued to us
383 * if a pageflip happened inside front-porch.
385 if (amdgpu_dm_vrr_active(acrtc_state)) {
386 drm_crtc_handle_vblank(&acrtc->base);
388 /* BTR processing for pre-DCE12 ASICs */
389 if (acrtc_state->stream &&
390 adev->family < AMDGPU_FAMILY_AI) {
391 spin_lock_irqsave(&adev->ddev->event_lock, flags);
392 mod_freesync_handle_v_update(
393 adev->dm.freesync_module,
395 &acrtc_state->vrr_params);
397 dc_stream_adjust_vmin_vmax(
400 &acrtc_state->vrr_params.adjust);
401 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
407 static void dm_crtc_high_irq(void *interrupt_params)
409 struct common_irq_params *irq_params = interrupt_params;
410 struct amdgpu_device *adev = irq_params->adev;
411 struct amdgpu_crtc *acrtc;
412 struct dm_crtc_state *acrtc_state;
415 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
418 acrtc_state = to_dm_crtc_state(acrtc->base.state);
420 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
421 amdgpu_dm_vrr_active(acrtc_state));
423 /* Core vblank handling at start of front-porch is only possible
424 * in non-vrr mode, as only there vblank timestamping will give
425 * valid results while done in front-porch. Otherwise defer it
426 * to dm_vupdate_high_irq after end of front-porch.
428 if (!amdgpu_dm_vrr_active(acrtc_state))
429 drm_crtc_handle_vblank(&acrtc->base);
431 /* Following stuff must happen at start of vblank, for crc
432 * computation and below-the-range btr support in vrr mode.
434 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
436 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
437 acrtc_state->vrr_params.supported &&
438 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
439 spin_lock_irqsave(&adev->ddev->event_lock, flags);
440 mod_freesync_handle_v_update(
441 adev->dm.freesync_module,
443 &acrtc_state->vrr_params);
445 dc_stream_adjust_vmin_vmax(
448 &acrtc_state->vrr_params.adjust);
449 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
454 static int dm_set_clockgating_state(void *handle,
455 enum amd_clockgating_state state)
460 static int dm_set_powergating_state(void *handle,
461 enum amd_powergating_state state)
466 /* Prototypes of private functions */
467 static int dm_early_init(void* handle);
469 /* Allocate memory for FBC compressed data */
470 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
472 struct drm_device *dev = connector->dev;
473 struct amdgpu_device *adev = dev->dev_private;
474 struct dm_comressor_info *compressor = &adev->dm.compressor;
475 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
476 struct drm_display_mode *mode;
477 unsigned long max_size = 0;
479 if (adev->dm.dc->fbc_compressor == NULL)
482 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
485 if (compressor->bo_ptr)
489 list_for_each_entry(mode, &connector->modes, head) {
490 if (max_size < mode->htotal * mode->vtotal)
491 max_size = mode->htotal * mode->vtotal;
495 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
496 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
497 &compressor->gpu_addr, &compressor->cpu_addr);
500 DRM_ERROR("DM: Failed to initialize FBC\n");
502 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
503 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
510 static int amdgpu_dm_init(struct amdgpu_device *adev)
512 struct dc_init_data init_data;
513 adev->dm.ddev = adev->ddev;
514 adev->dm.adev = adev;
516 /* Zero all the fields */
517 memset(&init_data, 0, sizeof(init_data));
519 mutex_init(&adev->dm.dc_lock);
521 if(amdgpu_dm_irq_init(adev)) {
522 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
526 init_data.asic_id.chip_family = adev->family;
528 init_data.asic_id.pci_revision_id = adev->rev_id;
529 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
531 init_data.asic_id.vram_width = adev->gmc.vram_width;
532 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
533 init_data.asic_id.atombios_base_address =
534 adev->mode_info.atom_context->bios;
536 init_data.driver = adev;
538 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
540 if (!adev->dm.cgs_device) {
541 DRM_ERROR("amdgpu: failed to create cgs device.\n");
545 init_data.cgs_device = adev->dm.cgs_device;
547 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
550 * TODO debug why this doesn't work on Raven
552 if (adev->flags & AMD_IS_APU &&
553 adev->asic_type >= CHIP_CARRIZO &&
554 adev->asic_type < CHIP_RAVEN)
555 init_data.flags.gpu_vm_support = true;
557 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
558 init_data.flags.fbc_support = true;
560 init_data.flags.power_down_display_on_boot = true;
562 /* Display Core create. */
563 adev->dm.dc = dc_create(&init_data);
566 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
568 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
572 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
573 if (!adev->dm.freesync_module) {
575 "amdgpu: failed to initialize freesync_module.\n");
577 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
578 adev->dm.freesync_module);
580 amdgpu_dm_init_color_mod();
582 if (amdgpu_dm_initialize_drm_device(adev)) {
584 "amdgpu: failed to initialize sw for display support.\n");
588 /* Update the actual used number of crtc */
589 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
591 /* TODO: Add_display_info? */
593 /* TODO use dynamic cursor width */
594 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
595 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
597 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
599 "amdgpu: failed to initialize sw for display support.\n");
603 #if defined(CONFIG_DEBUG_FS)
604 if (dtn_debugfs_init(adev))
605 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
608 DRM_DEBUG_DRIVER("KMS initialized.\n");
612 amdgpu_dm_fini(adev);
617 static void amdgpu_dm_fini(struct amdgpu_device *adev)
619 amdgpu_dm_destroy_drm_device(&adev->dm);
621 * TODO: pageflip, vlank interrupt
623 * amdgpu_dm_irq_fini(adev);
626 if (adev->dm.cgs_device) {
627 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
628 adev->dm.cgs_device = NULL;
630 if (adev->dm.freesync_module) {
631 mod_freesync_destroy(adev->dm.freesync_module);
632 adev->dm.freesync_module = NULL;
634 /* DC Destroy TODO: Replace destroy DAL */
636 dc_destroy(&adev->dm.dc);
638 mutex_destroy(&adev->dm.dc_lock);
643 static int load_dmcu_fw(struct amdgpu_device *adev)
645 const char *fw_name_dmcu;
647 const struct dmcu_firmware_header_v1_0 *hdr;
649 switch(adev->asic_type) {
668 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
671 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
675 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
676 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
680 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
682 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
683 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
684 adev->dm.fw_dmcu = NULL;
688 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
693 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
695 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
697 release_firmware(adev->dm.fw_dmcu);
698 adev->dm.fw_dmcu = NULL;
702 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
703 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
704 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
705 adev->firmware.fw_size +=
706 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
708 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
709 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
710 adev->firmware.fw_size +=
711 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
713 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
715 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
720 static int dm_sw_init(void *handle)
722 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724 return load_dmcu_fw(adev);
727 static int dm_sw_fini(void *handle)
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
731 if(adev->dm.fw_dmcu) {
732 release_firmware(adev->dm.fw_dmcu);
733 adev->dm.fw_dmcu = NULL;
739 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
741 struct amdgpu_dm_connector *aconnector;
742 struct drm_connector *connector;
745 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
747 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
748 aconnector = to_amdgpu_dm_connector(connector);
749 if (aconnector->dc_link->type == dc_connection_mst_branch &&
750 aconnector->mst_mgr.aux) {
751 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
752 aconnector, aconnector->base.base.id);
754 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
756 DRM_ERROR("DM_MST: Failed to start MST\n");
757 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
763 drm_modeset_unlock(&dev->mode_config.connection_mutex);
767 static int dm_late_init(void *handle)
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771 struct dmcu_iram_parameters params;
772 unsigned int linear_lut[16];
774 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
777 for (i = 0; i < 16; i++)
778 linear_lut[i] = 0xFFFF * i / 15;
781 params.backlight_ramping_start = 0xCCCC;
782 params.backlight_ramping_reduction = 0xCCCCCCCC;
783 params.backlight_lut_array_size = 16;
784 params.backlight_lut_array = linear_lut;
786 ret = dmcu_load_iram(dmcu, params);
791 return detect_mst_link_for_all_connectors(adev->ddev);
794 static void s3_handle_mst(struct drm_device *dev, bool suspend)
796 struct amdgpu_dm_connector *aconnector;
797 struct drm_connector *connector;
798 struct drm_dp_mst_topology_mgr *mgr;
800 bool need_hotplug = false;
802 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
804 list_for_each_entry(connector, &dev->mode_config.connector_list,
806 aconnector = to_amdgpu_dm_connector(connector);
807 if (aconnector->dc_link->type != dc_connection_mst_branch ||
808 aconnector->mst_port)
811 mgr = &aconnector->mst_mgr;
814 drm_dp_mst_topology_mgr_suspend(mgr);
816 ret = drm_dp_mst_topology_mgr_resume(mgr);
818 drm_dp_mst_topology_mgr_set_mst(mgr, false);
824 drm_modeset_unlock(&dev->mode_config.connection_mutex);
827 drm_kms_helper_hotplug_event(dev);
831 * dm_hw_init() - Initialize DC device
832 * @handle: The base driver device containing the amdpgu_dm device.
834 * Initialize the &struct amdgpu_display_manager device. This involves calling
835 * the initializers of each DM component, then populating the struct with them.
837 * Although the function implies hardware initialization, both hardware and
838 * software are initialized here. Splitting them out to their relevant init
839 * hooks is a future TODO item.
841 * Some notable things that are initialized here:
843 * - Display Core, both software and hardware
844 * - DC modules that we need (freesync and color management)
845 * - DRM software states
846 * - Interrupt sources and handlers
848 * - Debug FS entries, if enabled
850 static int dm_hw_init(void *handle)
852 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
853 /* Create DAL display manager */
854 amdgpu_dm_init(adev);
855 amdgpu_dm_hpd_init(adev);
861 * dm_hw_fini() - Teardown DC device
862 * @handle: The base driver device containing the amdpgu_dm device.
864 * Teardown components within &struct amdgpu_display_manager that require
865 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
866 * were loaded. Also flush IRQ workqueues and disable them.
868 static int dm_hw_fini(void *handle)
870 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
872 amdgpu_dm_hpd_fini(adev);
874 amdgpu_dm_irq_fini(adev);
875 amdgpu_dm_fini(adev);
879 static int dm_suspend(void *handle)
881 struct amdgpu_device *adev = handle;
882 struct amdgpu_display_manager *dm = &adev->dm;
885 WARN_ON(adev->dm.cached_state);
886 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
888 s3_handle_mst(adev->ddev, true);
890 amdgpu_dm_irq_suspend(adev);
893 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
898 static struct amdgpu_dm_connector *
899 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
900 struct drm_crtc *crtc)
903 struct drm_connector_state *new_con_state;
904 struct drm_connector *connector;
905 struct drm_crtc *crtc_from_state;
907 for_each_new_connector_in_state(state, connector, new_con_state, i) {
908 crtc_from_state = new_con_state->crtc;
910 if (crtc_from_state == crtc)
911 return to_amdgpu_dm_connector(connector);
917 static void emulated_link_detect(struct dc_link *link)
919 struct dc_sink_init_data sink_init_data = { 0 };
920 struct display_sink_capability sink_caps = { 0 };
921 enum dc_edid_status edid_status;
922 struct dc_context *dc_ctx = link->ctx;
923 struct dc_sink *sink = NULL;
924 struct dc_sink *prev_sink = NULL;
926 link->type = dc_connection_none;
927 prev_sink = link->local_sink;
929 if (prev_sink != NULL)
930 dc_sink_retain(prev_sink);
932 switch (link->connector_signal) {
933 case SIGNAL_TYPE_HDMI_TYPE_A: {
934 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
935 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
939 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
940 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
941 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
945 case SIGNAL_TYPE_DVI_DUAL_LINK: {
946 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
947 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
951 case SIGNAL_TYPE_LVDS: {
952 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
953 sink_caps.signal = SIGNAL_TYPE_LVDS;
957 case SIGNAL_TYPE_EDP: {
958 sink_caps.transaction_type =
959 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
960 sink_caps.signal = SIGNAL_TYPE_EDP;
964 case SIGNAL_TYPE_DISPLAY_PORT: {
965 sink_caps.transaction_type =
966 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
967 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
972 DC_ERROR("Invalid connector type! signal:%d\n",
973 link->connector_signal);
977 sink_init_data.link = link;
978 sink_init_data.sink_signal = sink_caps.signal;
980 sink = dc_sink_create(&sink_init_data);
982 DC_ERROR("Failed to create sink!\n");
986 /* dc_sink_create returns a new reference */
987 link->local_sink = sink;
989 edid_status = dm_helpers_read_local_edid(
994 if (edid_status != EDID_OK)
995 DC_ERROR("Failed to read EDID");
999 static int dm_resume(void *handle)
1001 struct amdgpu_device *adev = handle;
1002 struct drm_device *ddev = adev->ddev;
1003 struct amdgpu_display_manager *dm = &adev->dm;
1004 struct amdgpu_dm_connector *aconnector;
1005 struct drm_connector *connector;
1006 struct drm_crtc *crtc;
1007 struct drm_crtc_state *new_crtc_state;
1008 struct dm_crtc_state *dm_new_crtc_state;
1009 struct drm_plane *plane;
1010 struct drm_plane_state *new_plane_state;
1011 struct dm_plane_state *dm_new_plane_state;
1012 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1013 enum dc_connection_type new_connection_type = dc_connection_none;
1016 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1017 dc_release_state(dm_state->context);
1018 dm_state->context = dc_create_state(dm->dc);
1019 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1020 dc_resource_state_construct(dm->dc, dm_state->context);
1022 /* power on hardware */
1023 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1025 /* program HPD filter */
1028 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
1029 s3_handle_mst(ddev, false);
1032 * early enable HPD Rx IRQ, should be done before set mode as short
1033 * pulse interrupts are used for MST
1035 amdgpu_dm_irq_resume_early(adev);
1038 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
1039 aconnector = to_amdgpu_dm_connector(connector);
1042 * this is the case when traversing through already created
1043 * MST connectors, should be skipped
1045 if (aconnector->mst_port)
1048 mutex_lock(&aconnector->hpd_lock);
1049 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1050 DRM_ERROR("KMS: Failed to detect connector\n");
1052 if (aconnector->base.force && new_connection_type == dc_connection_none)
1053 emulated_link_detect(aconnector->dc_link);
1055 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1057 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1058 aconnector->fake_enable = false;
1060 if (aconnector->dc_sink)
1061 dc_sink_release(aconnector->dc_sink);
1062 aconnector->dc_sink = NULL;
1063 amdgpu_dm_update_connector_after_detect(aconnector);
1064 mutex_unlock(&aconnector->hpd_lock);
1067 /* Force mode set in atomic commit */
1068 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1069 new_crtc_state->active_changed = true;
1072 * atomic_check is expected to create the dc states. We need to release
1073 * them here, since they were duplicated as part of the suspend
1076 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1077 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1078 if (dm_new_crtc_state->stream) {
1079 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1080 dc_stream_release(dm_new_crtc_state->stream);
1081 dm_new_crtc_state->stream = NULL;
1085 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1086 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1087 if (dm_new_plane_state->dc_state) {
1088 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1089 dc_plane_state_release(dm_new_plane_state->dc_state);
1090 dm_new_plane_state->dc_state = NULL;
1094 drm_atomic_helper_resume(ddev, dm->cached_state);
1096 dm->cached_state = NULL;
1098 amdgpu_dm_irq_resume_late(adev);
1106 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1107 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1108 * the base driver's device list to be initialized and torn down accordingly.
1110 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1113 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1115 .early_init = dm_early_init,
1116 .late_init = dm_late_init,
1117 .sw_init = dm_sw_init,
1118 .sw_fini = dm_sw_fini,
1119 .hw_init = dm_hw_init,
1120 .hw_fini = dm_hw_fini,
1121 .suspend = dm_suspend,
1122 .resume = dm_resume,
1123 .is_idle = dm_is_idle,
1124 .wait_for_idle = dm_wait_for_idle,
1125 .check_soft_reset = dm_check_soft_reset,
1126 .soft_reset = dm_soft_reset,
1127 .set_clockgating_state = dm_set_clockgating_state,
1128 .set_powergating_state = dm_set_powergating_state,
1131 const struct amdgpu_ip_block_version dm_ip_block =
1133 .type = AMD_IP_BLOCK_TYPE_DCE,
1137 .funcs = &amdgpu_dm_funcs,
1147 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1148 .fb_create = amdgpu_display_user_framebuffer_create,
1149 .output_poll_changed = drm_fb_helper_output_poll_changed,
1150 .atomic_check = amdgpu_dm_atomic_check,
1151 .atomic_commit = amdgpu_dm_atomic_commit,
1154 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1155 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1159 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1161 struct drm_connector *connector = &aconnector->base;
1162 struct drm_device *dev = connector->dev;
1163 struct dc_sink *sink;
1165 /* MST handled by drm_mst framework */
1166 if (aconnector->mst_mgr.mst_state == true)
1170 sink = aconnector->dc_link->local_sink;
1172 dc_sink_retain(sink);
1175 * Edid mgmt connector gets first update only in mode_valid hook and then
1176 * the connector sink is set to either fake or physical sink depends on link status.
1177 * Skip if already done during boot.
1179 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1180 && aconnector->dc_em_sink) {
1183 * For S3 resume with headless use eml_sink to fake stream
1184 * because on resume connector->sink is set to NULL
1186 mutex_lock(&dev->mode_config.mutex);
1189 if (aconnector->dc_sink) {
1190 amdgpu_dm_update_freesync_caps(connector, NULL);
1192 * retain and release below are used to
1193 * bump up refcount for sink because the link doesn't point
1194 * to it anymore after disconnect, so on next crtc to connector
1195 * reshuffle by UMD we will get into unwanted dc_sink release
1197 dc_sink_release(aconnector->dc_sink);
1199 aconnector->dc_sink = sink;
1200 dc_sink_retain(aconnector->dc_sink);
1201 amdgpu_dm_update_freesync_caps(connector,
1204 amdgpu_dm_update_freesync_caps(connector, NULL);
1205 if (!aconnector->dc_sink) {
1206 aconnector->dc_sink = aconnector->dc_em_sink;
1207 dc_sink_retain(aconnector->dc_sink);
1211 mutex_unlock(&dev->mode_config.mutex);
1214 dc_sink_release(sink);
1219 * TODO: temporary guard to look for proper fix
1220 * if this sink is MST sink, we should not do anything
1222 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1223 dc_sink_release(sink);
1227 if (aconnector->dc_sink == sink) {
1229 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1232 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1233 aconnector->connector_id);
1235 dc_sink_release(sink);
1239 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1240 aconnector->connector_id, aconnector->dc_sink, sink);
1242 mutex_lock(&dev->mode_config.mutex);
1245 * 1. Update status of the drm connector
1246 * 2. Send an event and let userspace tell us what to do
1250 * TODO: check if we still need the S3 mode update workaround.
1251 * If yes, put it here.
1253 if (aconnector->dc_sink)
1254 amdgpu_dm_update_freesync_caps(connector, NULL);
1256 aconnector->dc_sink = sink;
1257 dc_sink_retain(aconnector->dc_sink);
1258 if (sink->dc_edid.length == 0) {
1259 aconnector->edid = NULL;
1260 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1263 (struct edid *) sink->dc_edid.raw_edid;
1266 drm_connector_update_edid_property(connector,
1268 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1271 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1274 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1275 amdgpu_dm_update_freesync_caps(connector, NULL);
1276 drm_connector_update_edid_property(connector, NULL);
1277 aconnector->num_modes = 0;
1278 dc_sink_release(aconnector->dc_sink);
1279 aconnector->dc_sink = NULL;
1280 aconnector->edid = NULL;
1283 mutex_unlock(&dev->mode_config.mutex);
1286 dc_sink_release(sink);
1289 static void handle_hpd_irq(void *param)
1291 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1292 struct drm_connector *connector = &aconnector->base;
1293 struct drm_device *dev = connector->dev;
1294 enum dc_connection_type new_connection_type = dc_connection_none;
1297 * In case of failure or MST no need to update connector status or notify the OS
1298 * since (for MST case) MST does this in its own context.
1300 mutex_lock(&aconnector->hpd_lock);
1302 if (aconnector->fake_enable)
1303 aconnector->fake_enable = false;
1305 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1306 DRM_ERROR("KMS: Failed to detect connector\n");
1308 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1309 emulated_link_detect(aconnector->dc_link);
1312 drm_modeset_lock_all(dev);
1313 dm_restore_drm_connector_state(dev, connector);
1314 drm_modeset_unlock_all(dev);
1316 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1317 drm_kms_helper_hotplug_event(dev);
1319 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1320 amdgpu_dm_update_connector_after_detect(aconnector);
1323 drm_modeset_lock_all(dev);
1324 dm_restore_drm_connector_state(dev, connector);
1325 drm_modeset_unlock_all(dev);
1327 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1328 drm_kms_helper_hotplug_event(dev);
1330 mutex_unlock(&aconnector->hpd_lock);
1334 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1336 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1338 bool new_irq_handled = false;
1340 int dpcd_bytes_to_read;
1342 const int max_process_count = 30;
1343 int process_count = 0;
1345 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1347 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1348 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1349 /* DPCD 0x200 - 0x201 for downstream IRQ */
1350 dpcd_addr = DP_SINK_COUNT;
1352 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1353 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1354 dpcd_addr = DP_SINK_COUNT_ESI;
1357 dret = drm_dp_dpcd_read(
1358 &aconnector->dm_dp_aux.aux,
1361 dpcd_bytes_to_read);
1363 while (dret == dpcd_bytes_to_read &&
1364 process_count < max_process_count) {
1370 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1371 /* handle HPD short pulse irq */
1372 if (aconnector->mst_mgr.mst_state)
1374 &aconnector->mst_mgr,
1378 if (new_irq_handled) {
1379 /* ACK at DPCD to notify down stream */
1380 const int ack_dpcd_bytes_to_write =
1381 dpcd_bytes_to_read - 1;
1383 for (retry = 0; retry < 3; retry++) {
1386 wret = drm_dp_dpcd_write(
1387 &aconnector->dm_dp_aux.aux,
1390 ack_dpcd_bytes_to_write);
1391 if (wret == ack_dpcd_bytes_to_write)
1395 /* check if there is new irq to be handled */
1396 dret = drm_dp_dpcd_read(
1397 &aconnector->dm_dp_aux.aux,
1400 dpcd_bytes_to_read);
1402 new_irq_handled = false;
1408 if (process_count == max_process_count)
1409 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1412 static void handle_hpd_rx_irq(void *param)
1414 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1415 struct drm_connector *connector = &aconnector->base;
1416 struct drm_device *dev = connector->dev;
1417 struct dc_link *dc_link = aconnector->dc_link;
1418 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1419 enum dc_connection_type new_connection_type = dc_connection_none;
1422 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1423 * conflict, after implement i2c helper, this mutex should be
1426 if (dc_link->type != dc_connection_mst_branch)
1427 mutex_lock(&aconnector->hpd_lock);
1429 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1430 !is_mst_root_connector) {
1431 /* Downstream Port status changed. */
1432 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1433 DRM_ERROR("KMS: Failed to detect connector\n");
1435 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1436 emulated_link_detect(dc_link);
1438 if (aconnector->fake_enable)
1439 aconnector->fake_enable = false;
1441 amdgpu_dm_update_connector_after_detect(aconnector);
1444 drm_modeset_lock_all(dev);
1445 dm_restore_drm_connector_state(dev, connector);
1446 drm_modeset_unlock_all(dev);
1448 drm_kms_helper_hotplug_event(dev);
1449 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1451 if (aconnector->fake_enable)
1452 aconnector->fake_enable = false;
1454 amdgpu_dm_update_connector_after_detect(aconnector);
1457 drm_modeset_lock_all(dev);
1458 dm_restore_drm_connector_state(dev, connector);
1459 drm_modeset_unlock_all(dev);
1461 drm_kms_helper_hotplug_event(dev);
1464 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1465 (dc_link->type == dc_connection_mst_branch))
1466 dm_handle_hpd_rx_irq(aconnector);
1468 if (dc_link->type != dc_connection_mst_branch) {
1469 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1470 mutex_unlock(&aconnector->hpd_lock);
1474 static void register_hpd_handlers(struct amdgpu_device *adev)
1476 struct drm_device *dev = adev->ddev;
1477 struct drm_connector *connector;
1478 struct amdgpu_dm_connector *aconnector;
1479 const struct dc_link *dc_link;
1480 struct dc_interrupt_params int_params = {0};
1482 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1483 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1485 list_for_each_entry(connector,
1486 &dev->mode_config.connector_list, head) {
1488 aconnector = to_amdgpu_dm_connector(connector);
1489 dc_link = aconnector->dc_link;
1491 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1492 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1493 int_params.irq_source = dc_link->irq_source_hpd;
1495 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1497 (void *) aconnector);
1500 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1502 /* Also register for DP short pulse (hpd_rx). */
1503 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1504 int_params.irq_source = dc_link->irq_source_hpd_rx;
1506 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1508 (void *) aconnector);
1513 /* Register IRQ sources and initialize IRQ callbacks */
1514 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1516 struct dc *dc = adev->dm.dc;
1517 struct common_irq_params *c_irq_params;
1518 struct dc_interrupt_params int_params = {0};
1521 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1523 if (adev->asic_type == CHIP_VEGA10 ||
1524 adev->asic_type == CHIP_VEGA12 ||
1525 adev->asic_type == CHIP_VEGA20 ||
1526 adev->asic_type == CHIP_RAVEN)
1527 client_id = SOC15_IH_CLIENTID_DCE;
1529 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1530 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1533 * Actions of amdgpu_irq_add_id():
1534 * 1. Register a set() function with base driver.
1535 * Base driver will call set() function to enable/disable an
1536 * interrupt in DC hardware.
1537 * 2. Register amdgpu_dm_irq_handler().
1538 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1539 * coming from DC hardware.
1540 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1541 * for acknowledging and handling. */
1543 /* Use VBLANK interrupt */
1544 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1545 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1547 DRM_ERROR("Failed to add crtc irq id!\n");
1551 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1552 int_params.irq_source =
1553 dc_interrupt_to_irq_source(dc, i, 0);
1555 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1557 c_irq_params->adev = adev;
1558 c_irq_params->irq_src = int_params.irq_source;
1560 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1561 dm_crtc_high_irq, c_irq_params);
1564 /* Use VUPDATE interrupt */
1565 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1566 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1568 DRM_ERROR("Failed to add vupdate irq id!\n");
1572 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1573 int_params.irq_source =
1574 dc_interrupt_to_irq_source(dc, i, 0);
1576 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1578 c_irq_params->adev = adev;
1579 c_irq_params->irq_src = int_params.irq_source;
1581 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1582 dm_vupdate_high_irq, c_irq_params);
1585 /* Use GRPH_PFLIP interrupt */
1586 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1587 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1588 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1590 DRM_ERROR("Failed to add page flip irq id!\n");
1594 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1595 int_params.irq_source =
1596 dc_interrupt_to_irq_source(dc, i, 0);
1598 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1600 c_irq_params->adev = adev;
1601 c_irq_params->irq_src = int_params.irq_source;
1603 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1604 dm_pflip_high_irq, c_irq_params);
1609 r = amdgpu_irq_add_id(adev, client_id,
1610 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1612 DRM_ERROR("Failed to add hpd irq id!\n");
1616 register_hpd_handlers(adev);
1621 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1622 /* Register IRQ sources and initialize IRQ callbacks */
1623 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1625 struct dc *dc = adev->dm.dc;
1626 struct common_irq_params *c_irq_params;
1627 struct dc_interrupt_params int_params = {0};
1631 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1632 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1635 * Actions of amdgpu_irq_add_id():
1636 * 1. Register a set() function with base driver.
1637 * Base driver will call set() function to enable/disable an
1638 * interrupt in DC hardware.
1639 * 2. Register amdgpu_dm_irq_handler().
1640 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1641 * coming from DC hardware.
1642 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1643 * for acknowledging and handling.
1646 /* Use VSTARTUP interrupt */
1647 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1648 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1650 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1653 DRM_ERROR("Failed to add crtc irq id!\n");
1657 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1658 int_params.irq_source =
1659 dc_interrupt_to_irq_source(dc, i, 0);
1661 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1663 c_irq_params->adev = adev;
1664 c_irq_params->irq_src = int_params.irq_source;
1666 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1667 dm_crtc_high_irq, c_irq_params);
1670 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1671 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1672 * to trigger at end of each vblank, regardless of state of the lock,
1673 * matching DCE behaviour.
1675 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1676 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1678 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1681 DRM_ERROR("Failed to add vupdate irq id!\n");
1685 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1686 int_params.irq_source =
1687 dc_interrupt_to_irq_source(dc, i, 0);
1689 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1691 c_irq_params->adev = adev;
1692 c_irq_params->irq_src = int_params.irq_source;
1694 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1695 dm_vupdate_high_irq, c_irq_params);
1698 /* Use GRPH_PFLIP interrupt */
1699 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1700 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1702 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1704 DRM_ERROR("Failed to add page flip irq id!\n");
1708 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1709 int_params.irq_source =
1710 dc_interrupt_to_irq_source(dc, i, 0);
1712 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1714 c_irq_params->adev = adev;
1715 c_irq_params->irq_src = int_params.irq_source;
1717 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1718 dm_pflip_high_irq, c_irq_params);
1723 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1726 DRM_ERROR("Failed to add hpd irq id!\n");
1730 register_hpd_handlers(adev);
1737 * Acquires the lock for the atomic state object and returns
1738 * the new atomic state.
1740 * This should only be called during atomic check.
1742 static int dm_atomic_get_state(struct drm_atomic_state *state,
1743 struct dm_atomic_state **dm_state)
1745 struct drm_device *dev = state->dev;
1746 struct amdgpu_device *adev = dev->dev_private;
1747 struct amdgpu_display_manager *dm = &adev->dm;
1748 struct drm_private_state *priv_state;
1753 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1754 if (IS_ERR(priv_state))
1755 return PTR_ERR(priv_state);
1757 *dm_state = to_dm_atomic_state(priv_state);
1762 struct dm_atomic_state *
1763 dm_atomic_get_new_state(struct drm_atomic_state *state)
1765 struct drm_device *dev = state->dev;
1766 struct amdgpu_device *adev = dev->dev_private;
1767 struct amdgpu_display_manager *dm = &adev->dm;
1768 struct drm_private_obj *obj;
1769 struct drm_private_state *new_obj_state;
1772 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1773 if (obj->funcs == dm->atomic_obj.funcs)
1774 return to_dm_atomic_state(new_obj_state);
1780 struct dm_atomic_state *
1781 dm_atomic_get_old_state(struct drm_atomic_state *state)
1783 struct drm_device *dev = state->dev;
1784 struct amdgpu_device *adev = dev->dev_private;
1785 struct amdgpu_display_manager *dm = &adev->dm;
1786 struct drm_private_obj *obj;
1787 struct drm_private_state *old_obj_state;
1790 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1791 if (obj->funcs == dm->atomic_obj.funcs)
1792 return to_dm_atomic_state(old_obj_state);
1798 static struct drm_private_state *
1799 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1801 struct dm_atomic_state *old_state, *new_state;
1803 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1807 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1809 old_state = to_dm_atomic_state(obj->state);
1811 if (old_state && old_state->context)
1812 new_state->context = dc_copy_state(old_state->context);
1814 if (!new_state->context) {
1819 return &new_state->base;
1822 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1823 struct drm_private_state *state)
1825 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1827 if (dm_state && dm_state->context)
1828 dc_release_state(dm_state->context);
1833 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1834 .atomic_duplicate_state = dm_atomic_duplicate_state,
1835 .atomic_destroy_state = dm_atomic_destroy_state,
1838 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1840 struct dm_atomic_state *state;
1843 adev->mode_info.mode_config_initialized = true;
1845 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1846 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1848 adev->ddev->mode_config.max_width = 16384;
1849 adev->ddev->mode_config.max_height = 16384;
1851 adev->ddev->mode_config.preferred_depth = 24;
1852 adev->ddev->mode_config.prefer_shadow = 1;
1853 /* indicates support for immediate flip */
1854 adev->ddev->mode_config.async_page_flip = true;
1856 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1858 state = kzalloc(sizeof(*state), GFP_KERNEL);
1862 state->context = dc_create_state(adev->dm.dc);
1863 if (!state->context) {
1868 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1870 drm_atomic_private_obj_init(adev->ddev,
1871 &adev->dm.atomic_obj,
1873 &dm_atomic_state_funcs);
1875 r = amdgpu_display_modeset_create_props(adev);
1882 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1883 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1885 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1886 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1888 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1890 #if defined(CONFIG_ACPI)
1891 struct amdgpu_dm_backlight_caps caps;
1893 if (dm->backlight_caps.caps_valid)
1896 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1897 if (caps.caps_valid) {
1898 dm->backlight_caps.min_input_signal = caps.min_input_signal;
1899 dm->backlight_caps.max_input_signal = caps.max_input_signal;
1900 dm->backlight_caps.caps_valid = true;
1902 dm->backlight_caps.min_input_signal =
1903 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1904 dm->backlight_caps.max_input_signal =
1905 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1908 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1909 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1913 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1915 struct amdgpu_display_manager *dm = bl_get_data(bd);
1916 struct amdgpu_dm_backlight_caps caps;
1917 uint32_t brightness = bd->props.brightness;
1919 amdgpu_dm_update_backlight_caps(dm);
1920 caps = dm->backlight_caps;
1922 * The brightness input is in the range 0-255
1923 * It needs to be rescaled to be between the
1924 * requested min and max input signal
1926 * It also needs to be scaled up by 0x101 to
1927 * match the DC interface which has a range of
1933 * (caps.max_input_signal - caps.min_input_signal)
1934 / AMDGPU_MAX_BL_LEVEL
1935 + caps.min_input_signal * 0x101;
1937 if (dc_link_set_backlight_level(dm->backlight_link,
1944 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1946 struct amdgpu_display_manager *dm = bl_get_data(bd);
1947 int ret = dc_link_get_backlight_level(dm->backlight_link);
1949 if (ret == DC_ERROR_UNEXPECTED)
1950 return bd->props.brightness;
1954 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1955 .get_brightness = amdgpu_dm_backlight_get_brightness,
1956 .update_status = amdgpu_dm_backlight_update_status,
1960 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1963 struct backlight_properties props = { 0 };
1965 amdgpu_dm_update_backlight_caps(dm);
1967 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1968 props.brightness = AMDGPU_MAX_BL_LEVEL;
1969 props.type = BACKLIGHT_RAW;
1971 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1972 dm->adev->ddev->primary->index);
1974 dm->backlight_dev = backlight_device_register(bl_name,
1975 dm->adev->ddev->dev,
1977 &amdgpu_dm_backlight_ops,
1980 if (IS_ERR(dm->backlight_dev))
1981 DRM_ERROR("DM: Backlight registration failed!\n");
1983 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1988 static int initialize_plane(struct amdgpu_display_manager *dm,
1989 struct amdgpu_mode_info *mode_info, int plane_id,
1990 enum drm_plane_type plane_type,
1991 const struct dc_plane_cap *plane_cap)
1993 struct drm_plane *plane;
1994 unsigned long possible_crtcs;
1997 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1999 DRM_ERROR("KMS: Failed to allocate plane\n");
2002 plane->type = plane_type;
2005 * HACK: IGT tests expect that the primary plane for a CRTC
2006 * can only have one possible CRTC. Only expose support for
2007 * any CRTC if they're not going to be used as a primary plane
2008 * for a CRTC - like overlay or underlay planes.
2010 possible_crtcs = 1 << plane_id;
2011 if (plane_id >= dm->dc->caps.max_streams)
2012 possible_crtcs = 0xff;
2014 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2017 DRM_ERROR("KMS: Failed to initialize plane\n");
2023 mode_info->planes[plane_id] = plane;
2029 static void register_backlight_device(struct amdgpu_display_manager *dm,
2030 struct dc_link *link)
2032 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2033 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2035 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2036 link->type != dc_connection_none) {
2038 * Event if registration failed, we should continue with
2039 * DM initialization because not having a backlight control
2040 * is better then a black screen.
2042 amdgpu_dm_register_backlight_device(dm);
2044 if (dm->backlight_dev)
2045 dm->backlight_link = link;
2052 * In this architecture, the association
2053 * connector -> encoder -> crtc
2054 * id not really requried. The crtc and connector will hold the
2055 * display_index as an abstraction to use with DAL component
2057 * Returns 0 on success
2059 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2061 struct amdgpu_display_manager *dm = &adev->dm;
2063 struct amdgpu_dm_connector *aconnector = NULL;
2064 struct amdgpu_encoder *aencoder = NULL;
2065 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2067 int32_t primary_planes;
2068 enum dc_connection_type new_connection_type = dc_connection_none;
2069 const struct dc_plane_cap *plane;
2071 link_cnt = dm->dc->caps.max_links;
2072 if (amdgpu_dm_mode_config_init(dm->adev)) {
2073 DRM_ERROR("DM: Failed to initialize mode config\n");
2077 /* There is one primary plane per CRTC */
2078 primary_planes = dm->dc->caps.max_streams;
2079 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2082 * Initialize primary planes, implicit planes for legacy IOCTLS.
2083 * Order is reversed to match iteration order in atomic check.
2085 for (i = (primary_planes - 1); i >= 0; i--) {
2086 plane = &dm->dc->caps.planes[i];
2088 if (initialize_plane(dm, mode_info, i,
2089 DRM_PLANE_TYPE_PRIMARY, plane)) {
2090 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2096 * Initialize overlay planes, index starting after primary planes.
2097 * These planes have a higher DRM index than the primary planes since
2098 * they should be considered as having a higher z-order.
2099 * Order is reversed to match iteration order in atomic check.
2101 * Only support DCN for now, and only expose one so we don't encourage
2102 * userspace to use up all the pipes.
2104 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2105 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2107 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2110 if (!plane->blends_with_above || !plane->blends_with_below)
2113 if (!plane->pixel_format_support.argb8888)
2116 if (initialize_plane(dm, NULL, primary_planes + i,
2117 DRM_PLANE_TYPE_OVERLAY, plane)) {
2118 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2122 /* Only create one overlay plane. */
2126 for (i = 0; i < dm->dc->caps.max_streams; i++)
2127 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2128 DRM_ERROR("KMS: Failed to initialize crtc\n");
2132 dm->display_indexes_num = dm->dc->caps.max_streams;
2134 /* loops over all connectors on the board */
2135 for (i = 0; i < link_cnt; i++) {
2136 struct dc_link *link = NULL;
2138 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2140 "KMS: Cannot support more than %d display indexes\n",
2141 AMDGPU_DM_MAX_DISPLAY_INDEX);
2145 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2149 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2153 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2154 DRM_ERROR("KMS: Failed to initialize encoder\n");
2158 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2159 DRM_ERROR("KMS: Failed to initialize connector\n");
2163 link = dc_get_link_at_index(dm->dc, i);
2165 if (!dc_link_detect_sink(link, &new_connection_type))
2166 DRM_ERROR("KMS: Failed to detect connector\n");
2168 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2169 emulated_link_detect(link);
2170 amdgpu_dm_update_connector_after_detect(aconnector);
2172 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2173 amdgpu_dm_update_connector_after_detect(aconnector);
2174 register_backlight_device(dm, link);
2180 /* Software is initialized. Now we can register interrupt handlers. */
2181 switch (adev->asic_type) {
2191 case CHIP_POLARIS11:
2192 case CHIP_POLARIS10:
2193 case CHIP_POLARIS12:
2198 if (dce110_register_irq_handlers(dm->adev)) {
2199 DRM_ERROR("DM: Failed to initialize IRQ\n");
2203 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2205 if (dcn10_register_irq_handlers(dm->adev)) {
2206 DRM_ERROR("DM: Failed to initialize IRQ\n");
2212 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2216 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2217 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2227 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2229 drm_mode_config_cleanup(dm->ddev);
2230 drm_atomic_private_obj_fini(&dm->atomic_obj);
2234 /******************************************************************************
2235 * amdgpu_display_funcs functions
2236 *****************************************************************************/
2239 * dm_bandwidth_update - program display watermarks
2241 * @adev: amdgpu_device pointer
2243 * Calculate and program the display watermarks and line buffer allocation.
2245 static void dm_bandwidth_update(struct amdgpu_device *adev)
2247 /* TODO: implement later */
2250 static const struct amdgpu_display_funcs dm_display_funcs = {
2251 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2252 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2253 .backlight_set_level = NULL, /* never called for DC */
2254 .backlight_get_level = NULL, /* never called for DC */
2255 .hpd_sense = NULL,/* called unconditionally */
2256 .hpd_set_polarity = NULL, /* called unconditionally */
2257 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2258 .page_flip_get_scanoutpos =
2259 dm_crtc_get_scanoutpos,/* called unconditionally */
2260 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2261 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2264 #if defined(CONFIG_DEBUG_KERNEL_DC)
2266 static ssize_t s3_debug_store(struct device *device,
2267 struct device_attribute *attr,
2273 struct pci_dev *pdev = to_pci_dev(device);
2274 struct drm_device *drm_dev = pci_get_drvdata(pdev);
2275 struct amdgpu_device *adev = drm_dev->dev_private;
2277 ret = kstrtoint(buf, 0, &s3_state);
2282 drm_kms_helper_hotplug_event(adev->ddev);
2287 return ret == 0 ? count : 0;
2290 DEVICE_ATTR_WO(s3_debug);
2294 static int dm_early_init(void *handle)
2296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2298 switch (adev->asic_type) {
2301 adev->mode_info.num_crtc = 6;
2302 adev->mode_info.num_hpd = 6;
2303 adev->mode_info.num_dig = 6;
2306 adev->mode_info.num_crtc = 4;
2307 adev->mode_info.num_hpd = 6;
2308 adev->mode_info.num_dig = 7;
2312 adev->mode_info.num_crtc = 2;
2313 adev->mode_info.num_hpd = 6;
2314 adev->mode_info.num_dig = 6;
2318 adev->mode_info.num_crtc = 6;
2319 adev->mode_info.num_hpd = 6;
2320 adev->mode_info.num_dig = 7;
2323 adev->mode_info.num_crtc = 3;
2324 adev->mode_info.num_hpd = 6;
2325 adev->mode_info.num_dig = 9;
2328 adev->mode_info.num_crtc = 2;
2329 adev->mode_info.num_hpd = 6;
2330 adev->mode_info.num_dig = 9;
2332 case CHIP_POLARIS11:
2333 case CHIP_POLARIS12:
2334 adev->mode_info.num_crtc = 5;
2335 adev->mode_info.num_hpd = 5;
2336 adev->mode_info.num_dig = 5;
2338 case CHIP_POLARIS10:
2340 adev->mode_info.num_crtc = 6;
2341 adev->mode_info.num_hpd = 6;
2342 adev->mode_info.num_dig = 6;
2347 adev->mode_info.num_crtc = 6;
2348 adev->mode_info.num_hpd = 6;
2349 adev->mode_info.num_dig = 6;
2351 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2353 adev->mode_info.num_crtc = 4;
2354 adev->mode_info.num_hpd = 4;
2355 adev->mode_info.num_dig = 4;
2359 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2363 amdgpu_dm_set_irq_funcs(adev);
2365 if (adev->mode_info.funcs == NULL)
2366 adev->mode_info.funcs = &dm_display_funcs;
2369 * Note: Do NOT change adev->audio_endpt_rreg and
2370 * adev->audio_endpt_wreg because they are initialised in
2371 * amdgpu_device_init()
2373 #if defined(CONFIG_DEBUG_KERNEL_DC)
2376 &dev_attr_s3_debug);
2382 static bool modeset_required(struct drm_crtc_state *crtc_state,
2383 struct dc_stream_state *new_stream,
2384 struct dc_stream_state *old_stream)
2386 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2389 if (!crtc_state->enable)
2392 return crtc_state->active;
2395 static bool modereset_required(struct drm_crtc_state *crtc_state)
2397 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2400 return !crtc_state->enable || !crtc_state->active;
2403 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2405 drm_encoder_cleanup(encoder);
2409 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2410 .destroy = amdgpu_dm_encoder_destroy,
2414 static int fill_dc_scaling_info(const struct drm_plane_state *state,
2415 struct dc_scaling_info *scaling_info)
2417 int scale_w, scale_h;
2419 memset(scaling_info, 0, sizeof(*scaling_info));
2421 /* Source is fixed 16.16 but we ignore mantissa for now... */
2422 scaling_info->src_rect.x = state->src_x >> 16;
2423 scaling_info->src_rect.y = state->src_y >> 16;
2425 scaling_info->src_rect.width = state->src_w >> 16;
2426 if (scaling_info->src_rect.width == 0)
2429 scaling_info->src_rect.height = state->src_h >> 16;
2430 if (scaling_info->src_rect.height == 0)
2433 scaling_info->dst_rect.x = state->crtc_x;
2434 scaling_info->dst_rect.y = state->crtc_y;
2436 if (state->crtc_w == 0)
2439 scaling_info->dst_rect.width = state->crtc_w;
2441 if (state->crtc_h == 0)
2444 scaling_info->dst_rect.height = state->crtc_h;
2446 /* DRM doesn't specify clipping on destination output. */
2447 scaling_info->clip_rect = scaling_info->dst_rect;
2449 /* TODO: Validate scaling per-format with DC plane caps */
2450 scale_w = scaling_info->dst_rect.width * 1000 /
2451 scaling_info->src_rect.width;
2453 if (scale_w < 250 || scale_w > 16000)
2456 scale_h = scaling_info->dst_rect.height * 1000 /
2457 scaling_info->src_rect.height;
2459 if (scale_h < 250 || scale_h > 16000)
2463 * The "scaling_quality" can be ignored for now, quality = 0 has DC
2464 * assume reasonable defaults based on the format.
2470 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2471 uint64_t *tiling_flags)
2473 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2474 int r = amdgpu_bo_reserve(rbo, false);
2477 /* Don't show error message when returning -ERESTARTSYS */
2478 if (r != -ERESTARTSYS)
2479 DRM_ERROR("Unable to reserve buffer: %d\n", r);
2484 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2486 amdgpu_bo_unreserve(rbo);
2491 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2493 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2495 return offset ? (address + offset * 256) : 0;
2499 fill_plane_dcc_attributes(struct amdgpu_device *adev,
2500 const struct amdgpu_framebuffer *afb,
2501 const enum surface_pixel_format format,
2502 const enum dc_rotation_angle rotation,
2503 const union plane_size *plane_size,
2504 const union dc_tiling_info *tiling_info,
2505 const uint64_t info,
2506 struct dc_plane_dcc_param *dcc,
2507 struct dc_plane_address *address)
2509 struct dc *dc = adev->dm.dc;
2510 struct dc_dcc_surface_param input;
2511 struct dc_surface_dcc_cap output;
2512 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2513 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2514 uint64_t dcc_address;
2516 memset(&input, 0, sizeof(input));
2517 memset(&output, 0, sizeof(output));
2522 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2525 if (!dc->cap_funcs.get_dcc_compression_cap)
2528 input.format = format;
2529 input.surface_size.width = plane_size->grph.surface_size.width;
2530 input.surface_size.height = plane_size->grph.surface_size.height;
2531 input.swizzle_mode = tiling_info->gfx9.swizzle;
2533 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
2534 input.scan = SCAN_DIRECTION_HORIZONTAL;
2535 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
2536 input.scan = SCAN_DIRECTION_VERTICAL;
2538 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2541 if (!output.capable)
2544 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2548 dcc->grph.meta_pitch =
2549 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2550 dcc->grph.independent_64b_blks = i64b;
2552 dcc_address = get_dcc_address(afb->address, info);
2553 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2554 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2560 fill_plane_buffer_attributes(struct amdgpu_device *adev,
2561 const struct amdgpu_framebuffer *afb,
2562 const enum surface_pixel_format format,
2563 const enum dc_rotation_angle rotation,
2564 const uint64_t tiling_flags,
2565 union dc_tiling_info *tiling_info,
2566 union plane_size *plane_size,
2567 struct dc_plane_dcc_param *dcc,
2568 struct dc_plane_address *address)
2570 const struct drm_framebuffer *fb = &afb->base;
2573 memset(tiling_info, 0, sizeof(*tiling_info));
2574 memset(plane_size, 0, sizeof(*plane_size));
2575 memset(dcc, 0, sizeof(*dcc));
2576 memset(address, 0, sizeof(*address));
2578 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2579 plane_size->grph.surface_size.x = 0;
2580 plane_size->grph.surface_size.y = 0;
2581 plane_size->grph.surface_size.width = fb->width;
2582 plane_size->grph.surface_size.height = fb->height;
2583 plane_size->grph.surface_pitch =
2584 fb->pitches[0] / fb->format->cpp[0];
2586 address->type = PLN_ADDR_TYPE_GRAPHICS;
2587 address->grph.addr.low_part = lower_32_bits(afb->address);
2588 address->grph.addr.high_part = upper_32_bits(afb->address);
2590 uint64_t chroma_addr = afb->address + fb->offsets[1];
2592 plane_size->video.luma_size.x = 0;
2593 plane_size->video.luma_size.y = 0;
2594 plane_size->video.luma_size.width = fb->width;
2595 plane_size->video.luma_size.height = fb->height;
2596 plane_size->video.luma_pitch =
2597 fb->pitches[0] / fb->format->cpp[0];
2599 plane_size->video.chroma_size.x = 0;
2600 plane_size->video.chroma_size.y = 0;
2601 /* TODO: set these based on surface format */
2602 plane_size->video.chroma_size.width = fb->width / 2;
2603 plane_size->video.chroma_size.height = fb->height / 2;
2605 plane_size->video.chroma_pitch =
2606 fb->pitches[1] / fb->format->cpp[1];
2608 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2609 address->video_progressive.luma_addr.low_part =
2610 lower_32_bits(afb->address);
2611 address->video_progressive.luma_addr.high_part =
2612 upper_32_bits(afb->address);
2613 address->video_progressive.chroma_addr.low_part =
2614 lower_32_bits(chroma_addr);
2615 address->video_progressive.chroma_addr.high_part =
2616 upper_32_bits(chroma_addr);
2619 /* Fill GFX8 params */
2620 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2621 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2623 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2624 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2625 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2626 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2627 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2629 /* XXX fix me for VI */
2630 tiling_info->gfx8.num_banks = num_banks;
2631 tiling_info->gfx8.array_mode =
2632 DC_ARRAY_2D_TILED_THIN1;
2633 tiling_info->gfx8.tile_split = tile_split;
2634 tiling_info->gfx8.bank_width = bankw;
2635 tiling_info->gfx8.bank_height = bankh;
2636 tiling_info->gfx8.tile_aspect = mtaspect;
2637 tiling_info->gfx8.tile_mode =
2638 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2639 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2640 == DC_ARRAY_1D_TILED_THIN1) {
2641 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2644 tiling_info->gfx8.pipe_config =
2645 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2647 if (adev->asic_type == CHIP_VEGA10 ||
2648 adev->asic_type == CHIP_VEGA12 ||
2649 adev->asic_type == CHIP_VEGA20 ||
2650 adev->asic_type == CHIP_RAVEN) {
2651 /* Fill GFX9 params */
2652 tiling_info->gfx9.num_pipes =
2653 adev->gfx.config.gb_addr_config_fields.num_pipes;
2654 tiling_info->gfx9.num_banks =
2655 adev->gfx.config.gb_addr_config_fields.num_banks;
2656 tiling_info->gfx9.pipe_interleave =
2657 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2658 tiling_info->gfx9.num_shader_engines =
2659 adev->gfx.config.gb_addr_config_fields.num_se;
2660 tiling_info->gfx9.max_compressed_frags =
2661 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2662 tiling_info->gfx9.num_rb_per_se =
2663 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2664 tiling_info->gfx9.swizzle =
2665 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2666 tiling_info->gfx9.shaderEnable = 1;
2668 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
2669 plane_size, tiling_info,
2670 tiling_flags, dcc, address);
2679 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
2680 bool *per_pixel_alpha, bool *global_alpha,
2681 int *global_alpha_value)
2683 *per_pixel_alpha = false;
2684 *global_alpha = false;
2685 *global_alpha_value = 0xff;
2687 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2690 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2691 static const uint32_t alpha_formats[] = {
2692 DRM_FORMAT_ARGB8888,
2693 DRM_FORMAT_RGBA8888,
2694 DRM_FORMAT_ABGR8888,
2696 uint32_t format = plane_state->fb->format->format;
2699 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2700 if (format == alpha_formats[i]) {
2701 *per_pixel_alpha = true;
2707 if (plane_state->alpha < 0xffff) {
2708 *global_alpha = true;
2709 *global_alpha_value = plane_state->alpha >> 8;
2714 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
2715 const enum surface_pixel_format format,
2716 enum dc_color_space *color_space)
2720 *color_space = COLOR_SPACE_SRGB;
2722 /* DRM color properties only affect non-RGB formats. */
2723 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2726 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
2728 switch (plane_state->color_encoding) {
2729 case DRM_COLOR_YCBCR_BT601:
2731 *color_space = COLOR_SPACE_YCBCR601;
2733 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
2736 case DRM_COLOR_YCBCR_BT709:
2738 *color_space = COLOR_SPACE_YCBCR709;
2740 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
2743 case DRM_COLOR_YCBCR_BT2020:
2745 *color_space = COLOR_SPACE_2020_YCBCR;
2758 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
2759 const struct drm_plane_state *plane_state,
2760 const uint64_t tiling_flags,
2761 struct dc_plane_info *plane_info,
2762 struct dc_plane_address *address)
2764 const struct drm_framebuffer *fb = plane_state->fb;
2765 const struct amdgpu_framebuffer *afb =
2766 to_amdgpu_framebuffer(plane_state->fb);
2767 struct drm_format_name_buf format_name;
2770 memset(plane_info, 0, sizeof(*plane_info));
2772 switch (fb->format->format) {
2774 plane_info->format =
2775 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2777 case DRM_FORMAT_RGB565:
2778 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2780 case DRM_FORMAT_XRGB8888:
2781 case DRM_FORMAT_ARGB8888:
2782 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2784 case DRM_FORMAT_XRGB2101010:
2785 case DRM_FORMAT_ARGB2101010:
2786 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2788 case DRM_FORMAT_XBGR2101010:
2789 case DRM_FORMAT_ABGR2101010:
2790 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2792 case DRM_FORMAT_XBGR8888:
2793 case DRM_FORMAT_ABGR8888:
2794 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2796 case DRM_FORMAT_NV21:
2797 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2799 case DRM_FORMAT_NV12:
2800 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2804 "Unsupported screen format %s\n",
2805 drm_get_format_name(fb->format->format, &format_name));
2809 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
2810 case DRM_MODE_ROTATE_0:
2811 plane_info->rotation = ROTATION_ANGLE_0;
2813 case DRM_MODE_ROTATE_90:
2814 plane_info->rotation = ROTATION_ANGLE_90;
2816 case DRM_MODE_ROTATE_180:
2817 plane_info->rotation = ROTATION_ANGLE_180;
2819 case DRM_MODE_ROTATE_270:
2820 plane_info->rotation = ROTATION_ANGLE_270;
2823 plane_info->rotation = ROTATION_ANGLE_0;
2827 plane_info->visible = true;
2828 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
2830 ret = fill_plane_color_attributes(plane_state, plane_info->format,
2831 &plane_info->color_space);
2835 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
2836 plane_info->rotation, tiling_flags,
2837 &plane_info->tiling_info,
2838 &plane_info->plane_size,
2839 &plane_info->dcc, address);
2843 fill_blending_from_plane_state(
2844 plane_state, &plane_info->per_pixel_alpha,
2845 &plane_info->global_alpha, &plane_info->global_alpha_value);
2850 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
2851 struct dc_plane_state *dc_plane_state,
2852 struct drm_plane_state *plane_state,
2853 struct drm_crtc_state *crtc_state)
2855 const struct amdgpu_framebuffer *amdgpu_fb =
2856 to_amdgpu_framebuffer(plane_state->fb);
2857 struct dc_scaling_info scaling_info;
2858 struct dc_plane_info plane_info;
2859 uint64_t tiling_flags;
2862 ret = fill_dc_scaling_info(plane_state, &scaling_info);
2866 dc_plane_state->src_rect = scaling_info.src_rect;
2867 dc_plane_state->dst_rect = scaling_info.dst_rect;
2868 dc_plane_state->clip_rect = scaling_info.clip_rect;
2869 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
2871 ret = get_fb_info(amdgpu_fb, &tiling_flags);
2875 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
2877 &dc_plane_state->address);
2881 dc_plane_state->format = plane_info.format;
2882 dc_plane_state->color_space = plane_info.color_space;
2883 dc_plane_state->format = plane_info.format;
2884 dc_plane_state->plane_size = plane_info.plane_size;
2885 dc_plane_state->rotation = plane_info.rotation;
2886 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
2887 dc_plane_state->stereo_format = plane_info.stereo_format;
2888 dc_plane_state->tiling_info = plane_info.tiling_info;
2889 dc_plane_state->visible = plane_info.visible;
2890 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
2891 dc_plane_state->global_alpha = plane_info.global_alpha;
2892 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
2893 dc_plane_state->dcc = plane_info.dcc;
2896 * Always set input transfer function, since plane state is refreshed
2899 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2901 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2902 dc_plane_state->in_transfer_func = NULL;
2908 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2909 const struct dm_connector_state *dm_state,
2910 struct dc_stream_state *stream)
2912 enum amdgpu_rmx_type rmx_type;
2914 struct rect src = { 0 }; /* viewport in composition space*/
2915 struct rect dst = { 0 }; /* stream addressable area */
2917 /* no mode. nothing to be done */
2921 /* Full screen scaling by default */
2922 src.width = mode->hdisplay;
2923 src.height = mode->vdisplay;
2924 dst.width = stream->timing.h_addressable;
2925 dst.height = stream->timing.v_addressable;
2928 rmx_type = dm_state->scaling;
2929 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2930 if (src.width * dst.height <
2931 src.height * dst.width) {
2932 /* height needs less upscaling/more downscaling */
2933 dst.width = src.width *
2934 dst.height / src.height;
2936 /* width needs less upscaling/more downscaling */
2937 dst.height = src.height *
2938 dst.width / src.width;
2940 } else if (rmx_type == RMX_CENTER) {
2944 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2945 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2947 if (dm_state->underscan_enable) {
2948 dst.x += dm_state->underscan_hborder / 2;
2949 dst.y += dm_state->underscan_vborder / 2;
2950 dst.width -= dm_state->underscan_hborder;
2951 dst.height -= dm_state->underscan_vborder;
2958 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2959 dst.x, dst.y, dst.width, dst.height);
2963 static enum dc_color_depth
2964 convert_color_depth_from_display_info(const struct drm_connector *connector)
2966 struct dm_connector_state *dm_conn_state =
2967 to_dm_connector_state(connector->state);
2968 uint32_t bpc = connector->display_info.bpc;
2970 /* TODO: Remove this when there's support for max_bpc in drm */
2971 if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2972 /* Round down to nearest even number. */
2973 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2978 * Temporary Work around, DRM doesn't parse color depth for
2979 * EDID revision before 1.4
2980 * TODO: Fix edid parsing
2982 return COLOR_DEPTH_888;
2984 return COLOR_DEPTH_666;
2986 return COLOR_DEPTH_888;
2988 return COLOR_DEPTH_101010;
2990 return COLOR_DEPTH_121212;
2992 return COLOR_DEPTH_141414;
2994 return COLOR_DEPTH_161616;
2996 return COLOR_DEPTH_UNDEFINED;
3000 static enum dc_aspect_ratio
3001 get_aspect_ratio(const struct drm_display_mode *mode_in)
3003 /* 1-1 mapping, since both enums follow the HDMI spec. */
3004 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3007 static enum dc_color_space
3008 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3010 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3012 switch (dc_crtc_timing->pixel_encoding) {
3013 case PIXEL_ENCODING_YCBCR422:
3014 case PIXEL_ENCODING_YCBCR444:
3015 case PIXEL_ENCODING_YCBCR420:
3018 * 27030khz is the separation point between HDTV and SDTV
3019 * according to HDMI spec, we use YCbCr709 and YCbCr601
3022 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3023 if (dc_crtc_timing->flags.Y_ONLY)
3025 COLOR_SPACE_YCBCR709_LIMITED;
3027 color_space = COLOR_SPACE_YCBCR709;
3029 if (dc_crtc_timing->flags.Y_ONLY)
3031 COLOR_SPACE_YCBCR601_LIMITED;
3033 color_space = COLOR_SPACE_YCBCR601;
3038 case PIXEL_ENCODING_RGB:
3039 color_space = COLOR_SPACE_SRGB;
3050 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3052 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3055 timing_out->display_color_depth--;
3058 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3059 const struct drm_display_info *info)
3062 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3065 normalized_clk = timing_out->pix_clk_100hz / 10;
3066 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3067 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3068 normalized_clk /= 2;
3069 /* Adjusting pix clock following on HDMI spec based on colour depth */
3070 switch (timing_out->display_color_depth) {
3071 case COLOR_DEPTH_101010:
3072 normalized_clk = (normalized_clk * 30) / 24;
3074 case COLOR_DEPTH_121212:
3075 normalized_clk = (normalized_clk * 36) / 24;
3077 case COLOR_DEPTH_161616:
3078 normalized_clk = (normalized_clk * 48) / 24;
3083 if (normalized_clk <= info->max_tmds_clock)
3085 reduce_mode_colour_depth(timing_out);
3087 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3092 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
3093 const struct drm_display_mode *mode_in,
3094 const struct drm_connector *connector,
3095 const struct dc_stream_state *old_stream)
3097 struct dc_crtc_timing *timing_out = &stream->timing;
3098 const struct drm_display_info *info = &connector->display_info;
3100 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
3102 timing_out->h_border_left = 0;
3103 timing_out->h_border_right = 0;
3104 timing_out->v_border_top = 0;
3105 timing_out->v_border_bottom = 0;
3106 /* TODO: un-hardcode */
3107 if (drm_mode_is_420_only(info, mode_in)
3108 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3109 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3110 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3111 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3112 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3114 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3116 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3117 timing_out->display_color_depth = convert_color_depth_from_display_info(
3119 timing_out->scan_type = SCANNING_TYPE_NODATA;
3120 timing_out->hdmi_vic = 0;
3123 timing_out->vic = old_stream->timing.vic;
3124 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3125 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3127 timing_out->vic = drm_match_cea_mode(mode_in);
3128 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3129 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3130 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3131 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3134 timing_out->h_addressable = mode_in->crtc_hdisplay;
3135 timing_out->h_total = mode_in->crtc_htotal;
3136 timing_out->h_sync_width =
3137 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3138 timing_out->h_front_porch =
3139 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3140 timing_out->v_total = mode_in->crtc_vtotal;
3141 timing_out->v_addressable = mode_in->crtc_vdisplay;
3142 timing_out->v_front_porch =
3143 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3144 timing_out->v_sync_width =
3145 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3146 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3147 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3149 stream->output_color_space = get_output_color_space(timing_out);
3151 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3152 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3153 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3154 adjust_colour_depth_from_display_info(timing_out, info);
3157 static void fill_audio_info(struct audio_info *audio_info,
3158 const struct drm_connector *drm_connector,
3159 const struct dc_sink *dc_sink)
3162 int cea_revision = 0;
3163 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3165 audio_info->manufacture_id = edid_caps->manufacturer_id;
3166 audio_info->product_id = edid_caps->product_id;
3168 cea_revision = drm_connector->display_info.cea_rev;
3170 strscpy(audio_info->display_name,
3171 edid_caps->display_name,
3172 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3174 if (cea_revision >= 3) {
3175 audio_info->mode_count = edid_caps->audio_mode_count;
3177 for (i = 0; i < audio_info->mode_count; ++i) {
3178 audio_info->modes[i].format_code =
3179 (enum audio_format_code)
3180 (edid_caps->audio_modes[i].format_code);
3181 audio_info->modes[i].channel_count =
3182 edid_caps->audio_modes[i].channel_count;
3183 audio_info->modes[i].sample_rates.all =
3184 edid_caps->audio_modes[i].sample_rate;
3185 audio_info->modes[i].sample_size =
3186 edid_caps->audio_modes[i].sample_size;
3190 audio_info->flags.all = edid_caps->speaker_flags;
3192 /* TODO: We only check for the progressive mode, check for interlace mode too */
3193 if (drm_connector->latency_present[0]) {
3194 audio_info->video_latency = drm_connector->video_latency[0];
3195 audio_info->audio_latency = drm_connector->audio_latency[0];
3198 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3203 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3204 struct drm_display_mode *dst_mode)
3206 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3207 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3208 dst_mode->crtc_clock = src_mode->crtc_clock;
3209 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3210 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3211 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3212 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3213 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3214 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3215 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3216 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3217 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3218 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3219 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3223 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3224 const struct drm_display_mode *native_mode,
3227 if (scale_enabled) {
3228 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3229 } else if (native_mode->clock == drm_mode->clock &&
3230 native_mode->htotal == drm_mode->htotal &&
3231 native_mode->vtotal == drm_mode->vtotal) {
3232 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3234 /* no scaling nor amdgpu inserted, no need to patch */
3238 static struct dc_sink *
3239 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3241 struct dc_sink_init_data sink_init_data = { 0 };
3242 struct dc_sink *sink = NULL;
3243 sink_init_data.link = aconnector->dc_link;
3244 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3246 sink = dc_sink_create(&sink_init_data);
3248 DRM_ERROR("Failed to create sink!\n");
3251 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3256 static void set_multisync_trigger_params(
3257 struct dc_stream_state *stream)
3259 if (stream->triggered_crtc_reset.enabled) {
3260 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3261 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3265 static void set_master_stream(struct dc_stream_state *stream_set[],
3268 int j, highest_rfr = 0, master_stream = 0;
3270 for (j = 0; j < stream_count; j++) {
3271 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3272 int refresh_rate = 0;
3274 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3275 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3276 if (refresh_rate > highest_rfr) {
3277 highest_rfr = refresh_rate;
3282 for (j = 0; j < stream_count; j++) {
3284 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3288 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3292 if (context->stream_count < 2)
3294 for (i = 0; i < context->stream_count ; i++) {
3295 if (!context->streams[i])
3298 * TODO: add a function to read AMD VSDB bits and set
3299 * crtc_sync_master.multi_sync_enabled flag
3300 * For now it's set to false
3302 set_multisync_trigger_params(context->streams[i]);
3304 set_master_stream(context->streams, context->stream_count);
3307 static struct dc_stream_state *
3308 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3309 const struct drm_display_mode *drm_mode,
3310 const struct dm_connector_state *dm_state,
3311 const struct dc_stream_state *old_stream)
3313 struct drm_display_mode *preferred_mode = NULL;
3314 struct drm_connector *drm_connector;
3315 struct dc_stream_state *stream = NULL;
3316 struct drm_display_mode mode = *drm_mode;
3317 bool native_mode_found = false;
3318 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3320 int preferred_refresh = 0;
3322 struct dc_sink *sink = NULL;
3323 if (aconnector == NULL) {
3324 DRM_ERROR("aconnector is NULL!\n");
3328 drm_connector = &aconnector->base;
3330 if (!aconnector->dc_sink) {
3331 sink = create_fake_sink(aconnector);
3335 sink = aconnector->dc_sink;
3336 dc_sink_retain(sink);
3339 stream = dc_create_stream_for_sink(sink);
3341 if (stream == NULL) {
3342 DRM_ERROR("Failed to create stream for sink!\n");
3346 stream->dm_stream_context = aconnector;
3348 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3349 /* Search for preferred mode */
3350 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3351 native_mode_found = true;
3355 if (!native_mode_found)
3356 preferred_mode = list_first_entry_or_null(
3357 &aconnector->base.modes,
3358 struct drm_display_mode,
3361 mode_refresh = drm_mode_vrefresh(&mode);
3363 if (preferred_mode == NULL) {
3365 * This may not be an error, the use case is when we have no
3366 * usermode calls to reset and set mode upon hotplug. In this
3367 * case, we call set mode ourselves to restore the previous mode
3368 * and the modelist may not be filled in in time.
3370 DRM_DEBUG_DRIVER("No preferred mode found\n");
3372 decide_crtc_timing_for_drm_display_mode(
3373 &mode, preferred_mode,
3374 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3375 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3379 drm_mode_set_crtcinfo(&mode, 0);
3382 * If scaling is enabled and refresh rate didn't change
3383 * we copy the vic and polarities of the old timings
3385 if (!scale || mode_refresh != preferred_refresh)
3386 fill_stream_properties_from_drm_display_mode(stream,
3387 &mode, &aconnector->base, NULL);
3389 fill_stream_properties_from_drm_display_mode(stream,
3390 &mode, &aconnector->base, old_stream);
3392 update_stream_scaling_settings(&mode, dm_state, stream);
3395 &stream->audio_info,
3399 update_stream_signal(stream, sink);
3402 dc_sink_release(sink);
3407 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3409 drm_crtc_cleanup(crtc);
3413 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3414 struct drm_crtc_state *state)
3416 struct dm_crtc_state *cur = to_dm_crtc_state(state);
3418 /* TODO Destroy dc_stream objects are stream object is flattened */
3420 dc_stream_release(cur->stream);
3423 __drm_atomic_helper_crtc_destroy_state(state);
3429 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3431 struct dm_crtc_state *state;
3434 dm_crtc_destroy_state(crtc, crtc->state);
3436 state = kzalloc(sizeof(*state), GFP_KERNEL);
3437 if (WARN_ON(!state))
3440 crtc->state = &state->base;
3441 crtc->state->crtc = crtc;
3445 static struct drm_crtc_state *
3446 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3448 struct dm_crtc_state *state, *cur;
3450 cur = to_dm_crtc_state(crtc->state);
3452 if (WARN_ON(!crtc->state))
3455 state = kzalloc(sizeof(*state), GFP_KERNEL);
3459 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3462 state->stream = cur->stream;
3463 dc_stream_retain(state->stream);
3466 state->active_planes = cur->active_planes;
3467 state->interrupts_enabled = cur->interrupts_enabled;
3468 state->vrr_params = cur->vrr_params;
3469 state->vrr_infopacket = cur->vrr_infopacket;
3470 state->abm_level = cur->abm_level;
3471 state->vrr_supported = cur->vrr_supported;
3472 state->freesync_config = cur->freesync_config;
3473 state->crc_enabled = cur->crc_enabled;
3475 /* TODO Duplicate dc_stream after objects are stream object is flattened */
3477 return &state->base;
3480 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3482 enum dc_irq_source irq_source;
3483 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3484 struct amdgpu_device *adev = crtc->dev->dev_private;
3487 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3489 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3491 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3492 acrtc->crtc_id, enable ? "en" : "dis", rc);
3496 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3498 enum dc_irq_source irq_source;
3499 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3500 struct amdgpu_device *adev = crtc->dev->dev_private;
3501 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3505 /* vblank irq on -> Only need vupdate irq in vrr mode */
3506 if (amdgpu_dm_vrr_active(acrtc_state))
3507 rc = dm_set_vupdate_irq(crtc, true);
3509 /* vblank irq off -> vupdate irq off */
3510 rc = dm_set_vupdate_irq(crtc, false);
3516 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3517 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3520 static int dm_enable_vblank(struct drm_crtc *crtc)
3522 return dm_set_vblank(crtc, true);
3525 static void dm_disable_vblank(struct drm_crtc *crtc)
3527 dm_set_vblank(crtc, false);
3530 /* Implemented only the options currently availible for the driver */
3531 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3532 .reset = dm_crtc_reset_state,
3533 .destroy = amdgpu_dm_crtc_destroy,
3534 .gamma_set = drm_atomic_helper_legacy_gamma_set,
3535 .set_config = drm_atomic_helper_set_config,
3536 .page_flip = drm_atomic_helper_page_flip,
3537 .atomic_duplicate_state = dm_crtc_duplicate_state,
3538 .atomic_destroy_state = dm_crtc_destroy_state,
3539 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3540 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3541 .enable_vblank = dm_enable_vblank,
3542 .disable_vblank = dm_disable_vblank,
3545 static enum drm_connector_status
3546 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3549 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3553 * 1. This interface is NOT called in context of HPD irq.
3554 * 2. This interface *is called* in context of user-mode ioctl. Which
3555 * makes it a bad place for *any* MST-related activity.
3558 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3559 !aconnector->fake_enable)
3560 connected = (aconnector->dc_sink != NULL);
3562 connected = (aconnector->base.force == DRM_FORCE_ON);
3564 return (connected ? connector_status_connected :
3565 connector_status_disconnected);
3568 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3569 struct drm_connector_state *connector_state,
3570 struct drm_property *property,
3573 struct drm_device *dev = connector->dev;
3574 struct amdgpu_device *adev = dev->dev_private;
3575 struct dm_connector_state *dm_old_state =
3576 to_dm_connector_state(connector->state);
3577 struct dm_connector_state *dm_new_state =
3578 to_dm_connector_state(connector_state);
3582 if (property == dev->mode_config.scaling_mode_property) {
3583 enum amdgpu_rmx_type rmx_type;
3586 case DRM_MODE_SCALE_CENTER:
3587 rmx_type = RMX_CENTER;
3589 case DRM_MODE_SCALE_ASPECT:
3590 rmx_type = RMX_ASPECT;
3592 case DRM_MODE_SCALE_FULLSCREEN:
3593 rmx_type = RMX_FULL;
3595 case DRM_MODE_SCALE_NONE:
3601 if (dm_old_state->scaling == rmx_type)
3604 dm_new_state->scaling = rmx_type;
3606 } else if (property == adev->mode_info.underscan_hborder_property) {
3607 dm_new_state->underscan_hborder = val;
3609 } else if (property == adev->mode_info.underscan_vborder_property) {
3610 dm_new_state->underscan_vborder = val;
3612 } else if (property == adev->mode_info.underscan_property) {
3613 dm_new_state->underscan_enable = val;
3615 } else if (property == adev->mode_info.max_bpc_property) {
3616 dm_new_state->max_bpc = val;
3618 } else if (property == adev->mode_info.abm_level_property) {
3619 dm_new_state->abm_level = val;
3626 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3627 const struct drm_connector_state *state,
3628 struct drm_property *property,
3631 struct drm_device *dev = connector->dev;
3632 struct amdgpu_device *adev = dev->dev_private;
3633 struct dm_connector_state *dm_state =
3634 to_dm_connector_state(state);
3637 if (property == dev->mode_config.scaling_mode_property) {
3638 switch (dm_state->scaling) {
3640 *val = DRM_MODE_SCALE_CENTER;
3643 *val = DRM_MODE_SCALE_ASPECT;
3646 *val = DRM_MODE_SCALE_FULLSCREEN;
3650 *val = DRM_MODE_SCALE_NONE;
3654 } else if (property == adev->mode_info.underscan_hborder_property) {
3655 *val = dm_state->underscan_hborder;
3657 } else if (property == adev->mode_info.underscan_vborder_property) {
3658 *val = dm_state->underscan_vborder;
3660 } else if (property == adev->mode_info.underscan_property) {
3661 *val = dm_state->underscan_enable;
3663 } else if (property == adev->mode_info.max_bpc_property) {
3664 *val = dm_state->max_bpc;
3666 } else if (property == adev->mode_info.abm_level_property) {
3667 *val = dm_state->abm_level;
3674 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3676 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3677 const struct dc_link *link = aconnector->dc_link;
3678 struct amdgpu_device *adev = connector->dev->dev_private;
3679 struct amdgpu_display_manager *dm = &adev->dm;
3681 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3682 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3684 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3685 link->type != dc_connection_none &&
3686 dm->backlight_dev) {
3687 backlight_device_unregister(dm->backlight_dev);
3688 dm->backlight_dev = NULL;
3692 if (aconnector->dc_em_sink)
3693 dc_sink_release(aconnector->dc_em_sink);
3694 aconnector->dc_em_sink = NULL;
3695 if (aconnector->dc_sink)
3696 dc_sink_release(aconnector->dc_sink);
3697 aconnector->dc_sink = NULL;
3699 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3700 drm_connector_unregister(connector);
3701 drm_connector_cleanup(connector);
3705 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3707 struct dm_connector_state *state =
3708 to_dm_connector_state(connector->state);
3710 if (connector->state)
3711 __drm_atomic_helper_connector_destroy_state(connector->state);
3715 state = kzalloc(sizeof(*state), GFP_KERNEL);
3718 state->scaling = RMX_OFF;
3719 state->underscan_enable = false;
3720 state->underscan_hborder = 0;
3721 state->underscan_vborder = 0;
3724 __drm_atomic_helper_connector_reset(connector, &state->base);
3728 struct drm_connector_state *
3729 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3731 struct dm_connector_state *state =
3732 to_dm_connector_state(connector->state);
3734 struct dm_connector_state *new_state =
3735 kmemdup(state, sizeof(*state), GFP_KERNEL);
3740 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3742 new_state->freesync_capable = state->freesync_capable;
3743 new_state->abm_level = state->abm_level;
3744 new_state->scaling = state->scaling;
3745 new_state->underscan_enable = state->underscan_enable;
3746 new_state->underscan_hborder = state->underscan_hborder;
3747 new_state->underscan_vborder = state->underscan_vborder;
3748 new_state->max_bpc = state->max_bpc;
3750 return &new_state->base;
3753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3754 .reset = amdgpu_dm_connector_funcs_reset,
3755 .detect = amdgpu_dm_connector_detect,
3756 .fill_modes = drm_helper_probe_single_connector_modes,
3757 .destroy = amdgpu_dm_connector_destroy,
3758 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3759 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3760 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3761 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3764 static int get_modes(struct drm_connector *connector)
3766 return amdgpu_dm_connector_get_modes(connector);
3769 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3771 struct dc_sink_init_data init_params = {
3772 .link = aconnector->dc_link,
3773 .sink_signal = SIGNAL_TYPE_VIRTUAL
3777 if (!aconnector->base.edid_blob_ptr) {
3778 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3779 aconnector->base.name);
3781 aconnector->base.force = DRM_FORCE_OFF;
3782 aconnector->base.override_edid = false;
3786 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3788 aconnector->edid = edid;
3790 aconnector->dc_em_sink = dc_link_add_remote_sink(
3791 aconnector->dc_link,
3793 (edid->extensions + 1) * EDID_LENGTH,
3796 if (aconnector->base.force == DRM_FORCE_ON) {
3797 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3798 aconnector->dc_link->local_sink :
3799 aconnector->dc_em_sink;
3800 dc_sink_retain(aconnector->dc_sink);
3804 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3806 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3809 * In case of headless boot with force on for DP managed connector
3810 * Those settings have to be != 0 to get initial modeset
3812 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3813 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3814 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3818 aconnector->base.override_edid = true;
3819 create_eml_sink(aconnector);
3822 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3823 struct drm_display_mode *mode)
3825 int result = MODE_ERROR;
3826 struct dc_sink *dc_sink;
3827 struct amdgpu_device *adev = connector->dev->dev_private;
3828 /* TODO: Unhardcode stream count */
3829 struct dc_stream_state *stream;
3830 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3831 enum dc_status dc_result = DC_OK;
3833 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3834 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3838 * Only run this the first time mode_valid is called to initilialize
3841 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3842 !aconnector->dc_em_sink)
3843 handle_edid_mgmt(aconnector);
3845 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3847 if (dc_sink == NULL) {
3848 DRM_ERROR("dc_sink is NULL!\n");
3852 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3853 if (stream == NULL) {
3854 DRM_ERROR("Failed to create stream for sink!\n");
3858 dc_result = dc_validate_stream(adev->dm.dc, stream);
3860 if (dc_result == DC_OK)
3863 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3869 dc_stream_release(stream);
3872 /* TODO: error handling*/
3876 static int fill_hdr_info_packet(const struct drm_connector_state *state,
3877 struct dc_info_packet *out)
3879 struct hdmi_drm_infoframe frame;
3880 unsigned char buf[30]; /* 26 + 4 */
3884 memset(out, 0, sizeof(*out));
3886 if (!state->hdr_output_metadata)
3889 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
3893 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
3897 /* Static metadata is a fixed 26 bytes + 4 byte header. */
3901 /* Prepare the infopacket for DC. */
3902 switch (state->connector->connector_type) {
3903 case DRM_MODE_CONNECTOR_HDMIA:
3904 out->hb0 = 0x87; /* type */
3905 out->hb1 = 0x01; /* version */
3906 out->hb2 = 0x1A; /* length */
3907 out->sb[0] = buf[3]; /* checksum */
3911 case DRM_MODE_CONNECTOR_DisplayPort:
3912 case DRM_MODE_CONNECTOR_eDP:
3913 out->hb0 = 0x00; /* sdp id, zero */
3914 out->hb1 = 0x87; /* type */
3915 out->hb2 = 0x1D; /* payload len - 1 */
3916 out->hb3 = (0x13 << 2); /* sdp version */
3917 out->sb[0] = 0x01; /* version */
3918 out->sb[1] = 0x1A; /* length */
3926 memcpy(&out->sb[i], &buf[4], 26);
3929 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
3930 sizeof(out->sb), false);
3936 is_hdr_metadata_different(const struct drm_connector_state *old_state,
3937 const struct drm_connector_state *new_state)
3939 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
3940 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
3942 if (old_blob != new_blob) {
3943 if (old_blob && new_blob &&
3944 old_blob->length == new_blob->length)
3945 return memcmp(old_blob->data, new_blob->data,
3955 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
3956 struct drm_atomic_state *state)
3958 struct drm_connector_state *new_con_state =
3959 drm_atomic_get_new_connector_state(state, conn);
3960 struct drm_connector_state *old_con_state =
3961 drm_atomic_get_old_connector_state(state, conn);
3962 struct drm_crtc *crtc = new_con_state->crtc;
3963 struct drm_crtc_state *new_crtc_state;
3969 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
3970 struct dc_info_packet hdr_infopacket;
3972 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
3976 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
3977 if (IS_ERR(new_crtc_state))
3978 return PTR_ERR(new_crtc_state);
3981 * DC considers the stream backends changed if the
3982 * static metadata changes. Forcing the modeset also
3983 * gives a simple way for userspace to switch from
3984 * 8bpc to 10bpc when setting the metadata to enter
3987 * Changing the static metadata after it's been
3988 * set is permissible, however. So only force a
3989 * modeset if we're entering or exiting HDR.
3991 new_crtc_state->mode_changed =
3992 !old_con_state->hdr_output_metadata ||
3993 !new_con_state->hdr_output_metadata;
3999 static const struct drm_connector_helper_funcs
4000 amdgpu_dm_connector_helper_funcs = {
4002 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4003 * modes will be filtered by drm_mode_validate_size(), and those modes
4004 * are missing after user start lightdm. So we need to renew modes list.
4005 * in get_modes call back, not just return the modes count
4007 .get_modes = get_modes,
4008 .mode_valid = amdgpu_dm_connector_mode_valid,
4009 .atomic_check = amdgpu_dm_connector_atomic_check,
4012 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4016 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4018 struct drm_device *dev = new_crtc_state->crtc->dev;
4019 struct drm_plane *plane;
4021 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4022 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4029 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4031 struct drm_atomic_state *state = new_crtc_state->state;
4032 struct drm_plane *plane;
4035 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4036 struct drm_plane_state *new_plane_state;
4038 /* Cursor planes are "fake". */
4039 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4042 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4044 if (!new_plane_state) {
4046 * The plane is enable on the CRTC and hasn't changed
4047 * state. This means that it previously passed
4048 * validation and is therefore enabled.
4054 /* We need a framebuffer to be considered enabled. */
4055 num_active += (new_plane_state->fb != NULL);
4062 * Sets whether interrupts should be enabled on a specific CRTC.
4063 * We require that the stream be enabled and that there exist active
4064 * DC planes on the stream.
4067 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4068 struct drm_crtc_state *new_crtc_state)
4070 struct dm_crtc_state *dm_new_crtc_state =
4071 to_dm_crtc_state(new_crtc_state);
4073 dm_new_crtc_state->active_planes = 0;
4074 dm_new_crtc_state->interrupts_enabled = false;
4076 if (!dm_new_crtc_state->stream)
4079 dm_new_crtc_state->active_planes =
4080 count_crtc_active_planes(new_crtc_state);
4082 dm_new_crtc_state->interrupts_enabled =
4083 dm_new_crtc_state->active_planes > 0;
4086 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4087 struct drm_crtc_state *state)
4089 struct amdgpu_device *adev = crtc->dev->dev_private;
4090 struct dc *dc = adev->dm.dc;
4091 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4095 * Update interrupt state for the CRTC. This needs to happen whenever
4096 * the CRTC has changed or whenever any of its planes have changed.
4097 * Atomic check satisfies both of these requirements since the CRTC
4098 * is added to the state by DRM during drm_atomic_helper_check_planes.
4100 dm_update_crtc_interrupt_state(crtc, state);
4102 if (unlikely(!dm_crtc_state->stream &&
4103 modeset_required(state, NULL, dm_crtc_state->stream))) {
4108 /* In some use cases, like reset, no stream is attached */
4109 if (!dm_crtc_state->stream)
4113 * We want at least one hardware plane enabled to use
4114 * the stream with a cursor enabled.
4116 if (state->enable && state->active &&
4117 does_crtc_have_active_cursor(state) &&
4118 dm_crtc_state->active_planes == 0)
4121 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4127 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4128 const struct drm_display_mode *mode,
4129 struct drm_display_mode *adjusted_mode)
4134 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4135 .disable = dm_crtc_helper_disable,
4136 .atomic_check = dm_crtc_helper_atomic_check,
4137 .mode_fixup = dm_crtc_helper_mode_fixup
4140 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4145 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4146 struct drm_crtc_state *crtc_state,
4147 struct drm_connector_state *conn_state)
4152 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4153 .disable = dm_encoder_helper_disable,
4154 .atomic_check = dm_encoder_helper_atomic_check
4157 static void dm_drm_plane_reset(struct drm_plane *plane)
4159 struct dm_plane_state *amdgpu_state = NULL;
4162 plane->funcs->atomic_destroy_state(plane, plane->state);
4164 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4165 WARN_ON(amdgpu_state == NULL);
4168 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4171 static struct drm_plane_state *
4172 dm_drm_plane_duplicate_state(struct drm_plane *plane)
4174 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4176 old_dm_plane_state = to_dm_plane_state(plane->state);
4177 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4178 if (!dm_plane_state)
4181 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4183 if (old_dm_plane_state->dc_state) {
4184 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4185 dc_plane_state_retain(dm_plane_state->dc_state);
4188 return &dm_plane_state->base;
4191 void dm_drm_plane_destroy_state(struct drm_plane *plane,
4192 struct drm_plane_state *state)
4194 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4196 if (dm_plane_state->dc_state)
4197 dc_plane_state_release(dm_plane_state->dc_state);
4199 drm_atomic_helper_plane_destroy_state(plane, state);
4202 static const struct drm_plane_funcs dm_plane_funcs = {
4203 .update_plane = drm_atomic_helper_update_plane,
4204 .disable_plane = drm_atomic_helper_disable_plane,
4205 .destroy = drm_primary_helper_destroy,
4206 .reset = dm_drm_plane_reset,
4207 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4208 .atomic_destroy_state = dm_drm_plane_destroy_state,
4211 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4212 struct drm_plane_state *new_state)
4214 struct amdgpu_framebuffer *afb;
4215 struct drm_gem_object *obj;
4216 struct amdgpu_device *adev;
4217 struct amdgpu_bo *rbo;
4218 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4219 uint64_t tiling_flags;
4223 dm_plane_state_old = to_dm_plane_state(plane->state);
4224 dm_plane_state_new = to_dm_plane_state(new_state);
4226 if (!new_state->fb) {
4227 DRM_DEBUG_DRIVER("No FB bound\n");
4231 afb = to_amdgpu_framebuffer(new_state->fb);
4232 obj = new_state->fb->obj[0];
4233 rbo = gem_to_amdgpu_bo(obj);
4234 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4235 r = amdgpu_bo_reserve(rbo, false);
4236 if (unlikely(r != 0))
4239 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4240 domain = amdgpu_display_supported_domains(adev);
4242 domain = AMDGPU_GEM_DOMAIN_VRAM;
4244 r = amdgpu_bo_pin(rbo, domain);
4245 if (unlikely(r != 0)) {
4246 if (r != -ERESTARTSYS)
4247 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4248 amdgpu_bo_unreserve(rbo);
4252 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4253 if (unlikely(r != 0)) {
4254 amdgpu_bo_unpin(rbo);
4255 amdgpu_bo_unreserve(rbo);
4256 DRM_ERROR("%p bind failed\n", rbo);
4260 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4262 amdgpu_bo_unreserve(rbo);
4264 afb->address = amdgpu_bo_gpu_offset(rbo);
4268 if (dm_plane_state_new->dc_state &&
4269 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4270 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4272 fill_plane_buffer_attributes(
4273 adev, afb, plane_state->format, plane_state->rotation,
4274 tiling_flags, &plane_state->tiling_info,
4275 &plane_state->plane_size, &plane_state->dcc,
4276 &plane_state->address);
4282 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4283 struct drm_plane_state *old_state)
4285 struct amdgpu_bo *rbo;
4291 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4292 r = amdgpu_bo_reserve(rbo, false);
4294 DRM_ERROR("failed to reserve rbo before unpin\n");
4298 amdgpu_bo_unpin(rbo);
4299 amdgpu_bo_unreserve(rbo);
4300 amdgpu_bo_unref(&rbo);
4303 static int dm_plane_atomic_check(struct drm_plane *plane,
4304 struct drm_plane_state *state)
4306 struct amdgpu_device *adev = plane->dev->dev_private;
4307 struct dc *dc = adev->dm.dc;
4308 struct dm_plane_state *dm_plane_state;
4309 struct dc_scaling_info scaling_info;
4312 dm_plane_state = to_dm_plane_state(state);
4314 if (!dm_plane_state->dc_state)
4317 ret = fill_dc_scaling_info(state, &scaling_info);
4321 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4327 static int dm_plane_atomic_async_check(struct drm_plane *plane,
4328 struct drm_plane_state *new_plane_state)
4330 struct drm_plane_state *old_plane_state =
4331 drm_atomic_get_old_plane_state(new_plane_state->state, plane);
4333 /* Only support async updates on cursor planes. */
4334 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4338 * DRM calls prepare_fb and cleanup_fb on new_plane_state for
4339 * async commits so don't allow fb changes.
4341 if (old_plane_state->fb != new_plane_state->fb)
4347 static void dm_plane_atomic_async_update(struct drm_plane *plane,
4348 struct drm_plane_state *new_state)
4350 struct drm_plane_state *old_state =
4351 drm_atomic_get_old_plane_state(new_state->state, plane);
4353 if (plane->state->fb != new_state->fb)
4354 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
4356 plane->state->src_x = new_state->src_x;
4357 plane->state->src_y = new_state->src_y;
4358 plane->state->src_w = new_state->src_w;
4359 plane->state->src_h = new_state->src_h;
4360 plane->state->crtc_x = new_state->crtc_x;
4361 plane->state->crtc_y = new_state->crtc_y;
4362 plane->state->crtc_w = new_state->crtc_w;
4363 plane->state->crtc_h = new_state->crtc_h;
4365 handle_cursor_update(plane, old_state);
4368 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4369 .prepare_fb = dm_plane_helper_prepare_fb,
4370 .cleanup_fb = dm_plane_helper_cleanup_fb,
4371 .atomic_check = dm_plane_atomic_check,
4372 .atomic_async_check = dm_plane_atomic_async_check,
4373 .atomic_async_update = dm_plane_atomic_async_update
4377 * TODO: these are currently initialized to rgb formats only.
4378 * For future use cases we should either initialize them dynamically based on
4379 * plane capabilities, or initialize this array to all formats, so internal drm
4380 * check will succeed, and let DC implement proper check
4382 static const uint32_t rgb_formats[] = {
4383 DRM_FORMAT_XRGB8888,
4384 DRM_FORMAT_ARGB8888,
4385 DRM_FORMAT_RGBA8888,
4386 DRM_FORMAT_XRGB2101010,
4387 DRM_FORMAT_XBGR2101010,
4388 DRM_FORMAT_ARGB2101010,
4389 DRM_FORMAT_ABGR2101010,
4390 DRM_FORMAT_XBGR8888,
4391 DRM_FORMAT_ABGR8888,
4395 static const uint32_t overlay_formats[] = {
4396 DRM_FORMAT_XRGB8888,
4397 DRM_FORMAT_ARGB8888,
4398 DRM_FORMAT_RGBA8888,
4399 DRM_FORMAT_XBGR8888,
4400 DRM_FORMAT_ABGR8888,
4404 static const u32 cursor_formats[] = {
4408 static int get_plane_formats(const struct drm_plane *plane,
4409 const struct dc_plane_cap *plane_cap,
4410 uint32_t *formats, int max_formats)
4412 int i, num_formats = 0;
4415 * TODO: Query support for each group of formats directly from
4416 * DC plane caps. This will require adding more formats to the
4420 switch (plane->type) {
4421 case DRM_PLANE_TYPE_PRIMARY:
4422 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4423 if (num_formats >= max_formats)
4426 formats[num_formats++] = rgb_formats[i];
4429 if (plane_cap && plane_cap->pixel_format_support.nv12)
4430 formats[num_formats++] = DRM_FORMAT_NV12;
4433 case DRM_PLANE_TYPE_OVERLAY:
4434 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4435 if (num_formats >= max_formats)
4438 formats[num_formats++] = overlay_formats[i];
4442 case DRM_PLANE_TYPE_CURSOR:
4443 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4444 if (num_formats >= max_formats)
4447 formats[num_formats++] = cursor_formats[i];
4455 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4456 struct drm_plane *plane,
4457 unsigned long possible_crtcs,
4458 const struct dc_plane_cap *plane_cap)
4460 uint32_t formats[32];
4464 num_formats = get_plane_formats(plane, plane_cap, formats,
4465 ARRAY_SIZE(formats));
4467 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4468 &dm_plane_funcs, formats, num_formats,
4469 NULL, plane->type, NULL);
4473 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4474 plane_cap && plane_cap->per_pixel_alpha) {
4475 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4476 BIT(DRM_MODE_BLEND_PREMULTI);
4478 drm_plane_create_alpha_property(plane);
4479 drm_plane_create_blend_mode_property(plane, blend_caps);
4482 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
4483 plane_cap && plane_cap->pixel_format_support.nv12) {
4484 /* This only affects YUV formats. */
4485 drm_plane_create_color_properties(
4487 BIT(DRM_COLOR_YCBCR_BT601) |
4488 BIT(DRM_COLOR_YCBCR_BT709),
4489 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
4490 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
4491 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
4494 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4496 /* Create (reset) the plane state */
4497 if (plane->funcs->reset)
4498 plane->funcs->reset(plane);
4503 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4504 struct drm_plane *plane,
4505 uint32_t crtc_index)
4507 struct amdgpu_crtc *acrtc = NULL;
4508 struct drm_plane *cursor_plane;
4512 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4516 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4517 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4519 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4523 res = drm_crtc_init_with_planes(
4528 &amdgpu_dm_crtc_funcs, NULL);
4533 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4535 /* Create (reset) the plane state */
4536 if (acrtc->base.funcs->reset)
4537 acrtc->base.funcs->reset(&acrtc->base);
4539 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4540 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4542 acrtc->crtc_id = crtc_index;
4543 acrtc->base.enabled = false;
4544 acrtc->otg_inst = -1;
4546 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4547 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4548 true, MAX_COLOR_LUT_ENTRIES);
4549 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4555 kfree(cursor_plane);
4560 static int to_drm_connector_type(enum signal_type st)
4563 case SIGNAL_TYPE_HDMI_TYPE_A:
4564 return DRM_MODE_CONNECTOR_HDMIA;
4565 case SIGNAL_TYPE_EDP:
4566 return DRM_MODE_CONNECTOR_eDP;
4567 case SIGNAL_TYPE_LVDS:
4568 return DRM_MODE_CONNECTOR_LVDS;
4569 case SIGNAL_TYPE_RGB:
4570 return DRM_MODE_CONNECTOR_VGA;
4571 case SIGNAL_TYPE_DISPLAY_PORT:
4572 case SIGNAL_TYPE_DISPLAY_PORT_MST:
4573 return DRM_MODE_CONNECTOR_DisplayPort;
4574 case SIGNAL_TYPE_DVI_DUAL_LINK:
4575 case SIGNAL_TYPE_DVI_SINGLE_LINK:
4576 return DRM_MODE_CONNECTOR_DVID;
4577 case SIGNAL_TYPE_VIRTUAL:
4578 return DRM_MODE_CONNECTOR_VIRTUAL;
4581 return DRM_MODE_CONNECTOR_Unknown;
4585 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4587 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4590 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4592 struct drm_encoder *encoder;
4593 struct amdgpu_encoder *amdgpu_encoder;
4595 encoder = amdgpu_dm_connector_to_encoder(connector);
4597 if (encoder == NULL)
4600 amdgpu_encoder = to_amdgpu_encoder(encoder);
4602 amdgpu_encoder->native_mode.clock = 0;
4604 if (!list_empty(&connector->probed_modes)) {
4605 struct drm_display_mode *preferred_mode = NULL;
4607 list_for_each_entry(preferred_mode,
4608 &connector->probed_modes,
4610 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4611 amdgpu_encoder->native_mode = *preferred_mode;
4619 static struct drm_display_mode *
4620 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4622 int hdisplay, int vdisplay)
4624 struct drm_device *dev = encoder->dev;
4625 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4626 struct drm_display_mode *mode = NULL;
4627 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4629 mode = drm_mode_duplicate(dev, native_mode);
4634 mode->hdisplay = hdisplay;
4635 mode->vdisplay = vdisplay;
4636 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4637 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4643 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4644 struct drm_connector *connector)
4646 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4647 struct drm_display_mode *mode = NULL;
4648 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4649 struct amdgpu_dm_connector *amdgpu_dm_connector =
4650 to_amdgpu_dm_connector(connector);
4654 char name[DRM_DISPLAY_MODE_LEN];
4657 } common_modes[] = {
4658 { "640x480", 640, 480},
4659 { "800x600", 800, 600},
4660 { "1024x768", 1024, 768},
4661 { "1280x720", 1280, 720},
4662 { "1280x800", 1280, 800},
4663 {"1280x1024", 1280, 1024},
4664 { "1440x900", 1440, 900},
4665 {"1680x1050", 1680, 1050},
4666 {"1600x1200", 1600, 1200},
4667 {"1920x1080", 1920, 1080},
4668 {"1920x1200", 1920, 1200}
4671 n = ARRAY_SIZE(common_modes);
4673 for (i = 0; i < n; i++) {
4674 struct drm_display_mode *curmode = NULL;
4675 bool mode_existed = false;
4677 if (common_modes[i].w > native_mode->hdisplay ||
4678 common_modes[i].h > native_mode->vdisplay ||
4679 (common_modes[i].w == native_mode->hdisplay &&
4680 common_modes[i].h == native_mode->vdisplay))
4683 list_for_each_entry(curmode, &connector->probed_modes, head) {
4684 if (common_modes[i].w == curmode->hdisplay &&
4685 common_modes[i].h == curmode->vdisplay) {
4686 mode_existed = true;
4694 mode = amdgpu_dm_create_common_mode(encoder,
4695 common_modes[i].name, common_modes[i].w,
4697 drm_mode_probed_add(connector, mode);
4698 amdgpu_dm_connector->num_modes++;
4702 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4705 struct amdgpu_dm_connector *amdgpu_dm_connector =
4706 to_amdgpu_dm_connector(connector);
4709 /* empty probed_modes */
4710 INIT_LIST_HEAD(&connector->probed_modes);
4711 amdgpu_dm_connector->num_modes =
4712 drm_add_edid_modes(connector, edid);
4714 amdgpu_dm_get_native_mode(connector);
4716 amdgpu_dm_connector->num_modes = 0;
4720 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4722 struct amdgpu_dm_connector *amdgpu_dm_connector =
4723 to_amdgpu_dm_connector(connector);
4724 struct drm_encoder *encoder;
4725 struct edid *edid = amdgpu_dm_connector->edid;
4727 encoder = amdgpu_dm_connector_to_encoder(connector);
4729 if (!edid || !drm_edid_is_valid(edid)) {
4730 amdgpu_dm_connector->num_modes =
4731 drm_add_modes_noedid(connector, 640, 480);
4733 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4734 amdgpu_dm_connector_add_common_modes(encoder, connector);
4736 amdgpu_dm_fbc_init(connector);
4738 return amdgpu_dm_connector->num_modes;
4741 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4742 struct amdgpu_dm_connector *aconnector,
4744 struct dc_link *link,
4747 struct amdgpu_device *adev = dm->ddev->dev_private;
4749 aconnector->connector_id = link_index;
4750 aconnector->dc_link = link;
4751 aconnector->base.interlace_allowed = false;
4752 aconnector->base.doublescan_allowed = false;
4753 aconnector->base.stereo_allowed = false;
4754 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4755 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4756 mutex_init(&aconnector->hpd_lock);
4759 * configure support HPD hot plug connector_>polled default value is 0
4760 * which means HPD hot plug not supported
4762 switch (connector_type) {
4763 case DRM_MODE_CONNECTOR_HDMIA:
4764 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4765 aconnector->base.ycbcr_420_allowed =
4766 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4768 case DRM_MODE_CONNECTOR_DisplayPort:
4769 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4770 aconnector->base.ycbcr_420_allowed =
4771 link->link_enc->features.dp_ycbcr420_supported ? true : false;
4773 case DRM_MODE_CONNECTOR_DVID:
4774 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4780 drm_object_attach_property(&aconnector->base.base,
4781 dm->ddev->mode_config.scaling_mode_property,
4782 DRM_MODE_SCALE_NONE);
4784 drm_object_attach_property(&aconnector->base.base,
4785 adev->mode_info.underscan_property,
4787 drm_object_attach_property(&aconnector->base.base,
4788 adev->mode_info.underscan_hborder_property,
4790 drm_object_attach_property(&aconnector->base.base,
4791 adev->mode_info.underscan_vborder_property,
4793 drm_object_attach_property(&aconnector->base.base,
4794 adev->mode_info.max_bpc_property,
4797 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4798 dc_is_dmcu_initialized(adev->dm.dc)) {
4799 drm_object_attach_property(&aconnector->base.base,
4800 adev->mode_info.abm_level_property, 0);
4803 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4804 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4805 connector_type == DRM_MODE_CONNECTOR_eDP) {
4806 drm_object_attach_property(
4807 &aconnector->base.base,
4808 dm->ddev->mode_config.hdr_output_metadata_property, 0);
4810 drm_connector_attach_vrr_capable_property(
4815 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4816 struct i2c_msg *msgs, int num)
4818 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4819 struct ddc_service *ddc_service = i2c->ddc_service;
4820 struct i2c_command cmd;
4824 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4829 cmd.number_of_payloads = num;
4830 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4833 for (i = 0; i < num; i++) {
4834 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4835 cmd.payloads[i].address = msgs[i].addr;
4836 cmd.payloads[i].length = msgs[i].len;
4837 cmd.payloads[i].data = msgs[i].buf;
4841 ddc_service->ctx->dc,
4842 ddc_service->ddc_pin->hw_info.ddc_channel,
4846 kfree(cmd.payloads);
4850 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4852 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4855 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4856 .master_xfer = amdgpu_dm_i2c_xfer,
4857 .functionality = amdgpu_dm_i2c_func,
4860 static struct amdgpu_i2c_adapter *
4861 create_i2c(struct ddc_service *ddc_service,
4865 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4866 struct amdgpu_i2c_adapter *i2c;
4868 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4871 i2c->base.owner = THIS_MODULE;
4872 i2c->base.class = I2C_CLASS_DDC;
4873 i2c->base.dev.parent = &adev->pdev->dev;
4874 i2c->base.algo = &amdgpu_dm_i2c_algo;
4875 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4876 i2c_set_adapdata(&i2c->base, i2c);
4877 i2c->ddc_service = ddc_service;
4878 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4885 * Note: this function assumes that dc_link_detect() was called for the
4886 * dc_link which will be represented by this aconnector.
4888 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4889 struct amdgpu_dm_connector *aconnector,
4890 uint32_t link_index,
4891 struct amdgpu_encoder *aencoder)
4895 struct dc *dc = dm->dc;
4896 struct dc_link *link = dc_get_link_at_index(dc, link_index);
4897 struct amdgpu_i2c_adapter *i2c;
4899 link->priv = aconnector;
4901 DRM_DEBUG_DRIVER("%s()\n", __func__);
4903 i2c = create_i2c(link->ddc, link->link_index, &res);
4905 DRM_ERROR("Failed to create i2c adapter data\n");
4909 aconnector->i2c = i2c;
4910 res = i2c_add_adapter(&i2c->base);
4913 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4917 connector_type = to_drm_connector_type(link->connector_signal);
4919 res = drm_connector_init(
4922 &amdgpu_dm_connector_funcs,
4926 DRM_ERROR("connector_init failed\n");
4927 aconnector->connector_id = -1;
4931 drm_connector_helper_add(
4933 &amdgpu_dm_connector_helper_funcs);
4935 if (aconnector->base.funcs->reset)
4936 aconnector->base.funcs->reset(&aconnector->base);
4938 amdgpu_dm_connector_init_helper(
4945 drm_connector_attach_encoder(
4946 &aconnector->base, &aencoder->base);
4948 drm_connector_register(&aconnector->base);
4949 #if defined(CONFIG_DEBUG_FS)
4950 res = connector_debugfs_init(aconnector);
4952 DRM_ERROR("Failed to create debugfs for connector");
4955 aconnector->debugfs_dpcd_address = 0;
4956 aconnector->debugfs_dpcd_size = 0;
4959 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4960 || connector_type == DRM_MODE_CONNECTOR_eDP)
4961 amdgpu_dm_initialize_dp_connector(dm, aconnector);
4966 aconnector->i2c = NULL;
4971 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4973 switch (adev->mode_info.num_crtc) {
4990 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4991 struct amdgpu_encoder *aencoder,
4992 uint32_t link_index)
4994 struct amdgpu_device *adev = dev->dev_private;
4996 int res = drm_encoder_init(dev,
4998 &amdgpu_dm_encoder_funcs,
4999 DRM_MODE_ENCODER_TMDS,
5002 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5005 aencoder->encoder_id = link_index;
5007 aencoder->encoder_id = -1;
5009 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5014 static void manage_dm_interrupts(struct amdgpu_device *adev,
5015 struct amdgpu_crtc *acrtc,
5019 * this is not correct translation but will work as soon as VBLANK
5020 * constant is the same as PFLIP
5023 amdgpu_display_crtc_idx_to_irq_type(
5028 drm_crtc_vblank_on(&acrtc->base);
5031 &adev->pageflip_irq,
5037 &adev->pageflip_irq,
5039 drm_crtc_vblank_off(&acrtc->base);
5044 is_scaling_state_different(const struct dm_connector_state *dm_state,
5045 const struct dm_connector_state *old_dm_state)
5047 if (dm_state->scaling != old_dm_state->scaling)
5049 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5050 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5052 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5053 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5055 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5056 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5061 static void remove_stream(struct amdgpu_device *adev,
5062 struct amdgpu_crtc *acrtc,
5063 struct dc_stream_state *stream)
5065 /* this is the update mode case */
5067 acrtc->otg_inst = -1;
5068 acrtc->enabled = false;
5071 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5072 struct dc_cursor_position *position)
5074 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5076 int xorigin = 0, yorigin = 0;
5078 if (!crtc || !plane->state->fb) {
5079 position->enable = false;
5085 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5086 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5087 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5089 plane->state->crtc_w,
5090 plane->state->crtc_h);
5094 x = plane->state->crtc_x;
5095 y = plane->state->crtc_y;
5097 if (crtc->primary->state) {
5098 /* avivo cursor are offset into the total surface */
5099 x += crtc->primary->state->src_x >> 16;
5100 y += crtc->primary->state->src_y >> 16;
5104 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5108 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5111 position->enable = true;
5114 position->x_hotspot = xorigin;
5115 position->y_hotspot = yorigin;
5120 static void handle_cursor_update(struct drm_plane *plane,
5121 struct drm_plane_state *old_plane_state)
5123 struct amdgpu_device *adev = plane->dev->dev_private;
5124 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5125 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5126 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5127 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5128 uint64_t address = afb ? afb->address : 0;
5129 struct dc_cursor_position position;
5130 struct dc_cursor_attributes attributes;
5133 if (!plane->state->fb && !old_plane_state->fb)
5136 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5138 amdgpu_crtc->crtc_id,
5139 plane->state->crtc_w,
5140 plane->state->crtc_h);
5142 ret = get_cursor_position(plane, crtc, &position);
5146 if (!position.enable) {
5147 /* turn off cursor */
5148 if (crtc_state && crtc_state->stream) {
5149 mutex_lock(&adev->dm.dc_lock);
5150 dc_stream_set_cursor_position(crtc_state->stream,
5152 mutex_unlock(&adev->dm.dc_lock);
5157 amdgpu_crtc->cursor_width = plane->state->crtc_w;
5158 amdgpu_crtc->cursor_height = plane->state->crtc_h;
5160 memset(&attributes, 0, sizeof(attributes));
5161 attributes.address.high_part = upper_32_bits(address);
5162 attributes.address.low_part = lower_32_bits(address);
5163 attributes.width = plane->state->crtc_w;
5164 attributes.height = plane->state->crtc_h;
5165 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5166 attributes.rotation_angle = 0;
5167 attributes.attribute_flags.value = 0;
5169 attributes.pitch = attributes.width;
5171 if (crtc_state->stream) {
5172 mutex_lock(&adev->dm.dc_lock);
5173 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5175 DRM_ERROR("DC failed to set cursor attributes\n");
5177 if (!dc_stream_set_cursor_position(crtc_state->stream,
5179 DRM_ERROR("DC failed to set cursor position\n");
5180 mutex_unlock(&adev->dm.dc_lock);
5184 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5187 assert_spin_locked(&acrtc->base.dev->event_lock);
5188 WARN_ON(acrtc->event);
5190 acrtc->event = acrtc->base.state->event;
5192 /* Set the flip status */
5193 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5195 /* Mark this event as consumed */
5196 acrtc->base.state->event = NULL;
5198 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5202 static void update_freesync_state_on_stream(
5203 struct amdgpu_display_manager *dm,
5204 struct dm_crtc_state *new_crtc_state,
5205 struct dc_stream_state *new_stream,
5206 struct dc_plane_state *surface,
5207 u32 flip_timestamp_in_us)
5209 struct mod_vrr_params vrr_params;
5210 struct dc_info_packet vrr_infopacket = {0};
5211 struct amdgpu_device *adev = dm->adev;
5212 unsigned long flags;
5218 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5219 * For now it's sufficient to just guard against these conditions.
5222 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5225 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5226 vrr_params = new_crtc_state->vrr_params;
5229 mod_freesync_handle_preflip(
5230 dm->freesync_module,
5233 flip_timestamp_in_us,
5236 if (adev->family < AMDGPU_FAMILY_AI &&
5237 amdgpu_dm_vrr_active(new_crtc_state)) {
5238 mod_freesync_handle_v_update(dm->freesync_module,
5239 new_stream, &vrr_params);
5243 mod_freesync_build_vrr_infopacket(
5244 dm->freesync_module,
5248 TRANSFER_FUNC_UNKNOWN,
5251 new_crtc_state->freesync_timing_changed |=
5252 (memcmp(&new_crtc_state->vrr_params.adjust,
5254 sizeof(vrr_params.adjust)) != 0);
5256 new_crtc_state->freesync_vrr_info_changed |=
5257 (memcmp(&new_crtc_state->vrr_infopacket,
5259 sizeof(vrr_infopacket)) != 0);
5261 new_crtc_state->vrr_params = vrr_params;
5262 new_crtc_state->vrr_infopacket = vrr_infopacket;
5264 new_stream->adjust = new_crtc_state->vrr_params.adjust;
5265 new_stream->vrr_infopacket = vrr_infopacket;
5267 if (new_crtc_state->freesync_vrr_info_changed)
5268 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5269 new_crtc_state->base.crtc->base.id,
5270 (int)new_crtc_state->base.vrr_enabled,
5271 (int)vrr_params.state);
5273 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5276 static void pre_update_freesync_state_on_stream(
5277 struct amdgpu_display_manager *dm,
5278 struct dm_crtc_state *new_crtc_state)
5280 struct dc_stream_state *new_stream = new_crtc_state->stream;
5281 struct mod_vrr_params vrr_params;
5282 struct mod_freesync_config config = new_crtc_state->freesync_config;
5283 struct amdgpu_device *adev = dm->adev;
5284 unsigned long flags;
5290 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5291 * For now it's sufficient to just guard against these conditions.
5293 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5296 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5297 vrr_params = new_crtc_state->vrr_params;
5299 if (new_crtc_state->vrr_supported &&
5300 config.min_refresh_in_uhz &&
5301 config.max_refresh_in_uhz) {
5302 config.state = new_crtc_state->base.vrr_enabled ?
5303 VRR_STATE_ACTIVE_VARIABLE :
5306 config.state = VRR_STATE_UNSUPPORTED;
5309 mod_freesync_build_vrr_params(dm->freesync_module,
5311 &config, &vrr_params);
5313 new_crtc_state->freesync_timing_changed |=
5314 (memcmp(&new_crtc_state->vrr_params.adjust,
5316 sizeof(vrr_params.adjust)) != 0);
5318 new_crtc_state->vrr_params = vrr_params;
5319 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5322 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
5323 struct dm_crtc_state *new_state)
5325 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5326 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5328 if (!old_vrr_active && new_vrr_active) {
5329 /* Transition VRR inactive -> active:
5330 * While VRR is active, we must not disable vblank irq, as a
5331 * reenable after disable would compute bogus vblank/pflip
5332 * timestamps if it likely happened inside display front-porch.
5334 * We also need vupdate irq for the actual core vblank handling
5337 dm_set_vupdate_irq(new_state->base.crtc, true);
5338 drm_crtc_vblank_get(new_state->base.crtc);
5339 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5340 __func__, new_state->base.crtc->base.id);
5341 } else if (old_vrr_active && !new_vrr_active) {
5342 /* Transition VRR active -> inactive:
5343 * Allow vblank irq disable again for fixed refresh rate.
5345 dm_set_vupdate_irq(new_state->base.crtc, false);
5346 drm_crtc_vblank_put(new_state->base.crtc);
5347 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5348 __func__, new_state->base.crtc->base.id);
5352 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
5354 struct drm_plane *plane;
5355 struct drm_plane_state *old_plane_state, *new_plane_state;
5359 * TODO: Make this per-stream so we don't issue redundant updates for
5360 * commits with multiple streams.
5362 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
5364 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5365 handle_cursor_update(plane, old_plane_state);
5368 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5369 struct dc_state *dc_state,
5370 struct drm_device *dev,
5371 struct amdgpu_display_manager *dm,
5372 struct drm_crtc *pcrtc,
5373 bool wait_for_vblank)
5376 uint64_t timestamp_ns;
5377 struct drm_plane *plane;
5378 struct drm_plane_state *old_plane_state, *new_plane_state;
5379 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5380 struct drm_crtc_state *new_pcrtc_state =
5381 drm_atomic_get_new_crtc_state(state, pcrtc);
5382 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5383 struct dm_crtc_state *dm_old_crtc_state =
5384 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5385 int planes_count = 0, vpos, hpos;
5387 unsigned long flags;
5388 struct amdgpu_bo *abo;
5389 uint64_t tiling_flags;
5390 uint32_t target_vblank, last_flip_vblank;
5391 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5392 bool pflip_present = false;
5394 struct dc_surface_update surface_updates[MAX_SURFACES];
5395 struct dc_plane_info plane_infos[MAX_SURFACES];
5396 struct dc_scaling_info scaling_infos[MAX_SURFACES];
5397 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5398 struct dc_stream_update stream_update;
5401 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5404 dm_error("Failed to allocate update bundle\n");
5409 * Disable the cursor first if we're disabling all the planes.
5410 * It'll remain on the screen after the planes are re-enabled
5413 if (acrtc_state->active_planes == 0)
5414 amdgpu_dm_commit_cursors(state);
5416 /* update planes when needed */
5417 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5418 struct drm_crtc *crtc = new_plane_state->crtc;
5419 struct drm_crtc_state *new_crtc_state;
5420 struct drm_framebuffer *fb = new_plane_state->fb;
5421 bool plane_needs_flip;
5422 struct dc_plane_state *dc_plane;
5423 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5425 /* Cursor plane is handled after stream updates */
5426 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5429 if (!fb || !crtc || pcrtc != crtc)
5432 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5433 if (!new_crtc_state->active)
5436 dc_plane = dm_new_plane_state->dc_state;
5438 bundle->surface_updates[planes_count].surface = dc_plane;
5439 if (new_pcrtc_state->color_mgmt_changed) {
5440 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5441 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5444 fill_dc_scaling_info(new_plane_state,
5445 &bundle->scaling_infos[planes_count]);
5447 bundle->surface_updates[planes_count].scaling_info =
5448 &bundle->scaling_infos[planes_count];
5450 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5452 pflip_present = pflip_present || plane_needs_flip;
5454 if (!plane_needs_flip) {
5459 abo = gem_to_amdgpu_bo(fb->obj[0]);
5462 * Wait for all fences on this FB. Do limited wait to avoid
5463 * deadlock during GPU reset when this fence will not signal
5464 * but we hold reservation lock for the BO.
5466 r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
5468 msecs_to_jiffies(5000));
5469 if (unlikely(r <= 0))
5470 DRM_ERROR("Waiting for fences timed out or interrupted!");
5473 * TODO This might fail and hence better not used, wait
5474 * explicitly on fences instead
5475 * and in general should be called for
5476 * blocking commit to as per framework helpers
5478 r = amdgpu_bo_reserve(abo, true);
5479 if (unlikely(r != 0))
5480 DRM_ERROR("failed to reserve buffer before flip\n");
5482 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5484 amdgpu_bo_unreserve(abo);
5486 fill_dc_plane_info_and_addr(
5487 dm->adev, new_plane_state, tiling_flags,
5488 &bundle->plane_infos[planes_count],
5489 &bundle->flip_addrs[planes_count].address);
5491 bundle->surface_updates[planes_count].plane_info =
5492 &bundle->plane_infos[planes_count];
5494 bundle->flip_addrs[planes_count].flip_immediate =
5495 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
5497 timestamp_ns = ktime_get_ns();
5498 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5499 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5500 bundle->surface_updates[planes_count].surface = dc_plane;
5502 if (!bundle->surface_updates[planes_count].surface) {
5503 DRM_ERROR("No surface for CRTC: id=%d\n",
5504 acrtc_attach->crtc_id);
5508 if (plane == pcrtc->primary)
5509 update_freesync_state_on_stream(
5512 acrtc_state->stream,
5514 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5516 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5518 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5519 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5525 if (pflip_present) {
5527 /* Use old throttling in non-vrr fixed refresh rate mode
5528 * to keep flip scheduling based on target vblank counts
5529 * working in a backwards compatible way, e.g., for
5530 * clients using the GLX_OML_sync_control extension or
5531 * DRI3/Present extension with defined target_msc.
5533 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
5536 /* For variable refresh rate mode only:
5537 * Get vblank of last completed flip to avoid > 1 vrr
5538 * flips per video frame by use of throttling, but allow
5539 * flip programming anywhere in the possibly large
5540 * variable vrr vblank interval for fine-grained flip
5541 * timing control and more opportunity to avoid stutter
5542 * on late submission of flips.
5544 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5545 last_flip_vblank = acrtc_attach->last_flip_vblank;
5546 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5549 target_vblank = last_flip_vblank + wait_for_vblank;
5552 * Wait until we're out of the vertical blank period before the one
5553 * targeted by the flip
5555 while ((acrtc_attach->enabled &&
5556 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5557 0, &vpos, &hpos, NULL,
5558 NULL, &pcrtc->hwmode)
5559 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5560 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5561 (int)(target_vblank -
5562 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5563 usleep_range(1000, 1100);
5566 if (acrtc_attach->base.state->event) {
5567 drm_crtc_vblank_get(pcrtc);
5569 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5571 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5572 prepare_flip_isr(acrtc_attach);
5574 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5577 if (acrtc_state->stream) {
5579 if (acrtc_state->freesync_timing_changed)
5580 bundle->stream_update.adjust =
5581 &acrtc_state->stream->adjust;
5583 if (acrtc_state->freesync_vrr_info_changed)
5584 bundle->stream_update.vrr_infopacket =
5585 &acrtc_state->stream->vrr_infopacket;
5589 /* Update the planes if changed or disable if we don't have any. */
5590 if (planes_count || acrtc_state->active_planes == 0) {
5591 if (new_pcrtc_state->mode_changed) {
5592 bundle->stream_update.src = acrtc_state->stream->src;
5593 bundle->stream_update.dst = acrtc_state->stream->dst;
5596 if (new_pcrtc_state->color_mgmt_changed)
5597 bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
5599 acrtc_state->stream->abm_level = acrtc_state->abm_level;
5600 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
5601 bundle->stream_update.abm_level = &acrtc_state->abm_level;
5603 mutex_lock(&dm->dc_lock);
5604 dc_commit_updates_for_stream(dm->dc,
5605 bundle->surface_updates,
5607 acrtc_state->stream,
5608 &bundle->stream_update,
5610 mutex_unlock(&dm->dc_lock);
5614 * Update cursor state *after* programming all the planes.
5615 * This avoids redundant programming in the case where we're going
5616 * to be disabling a single plane - those pipes are being disabled.
5618 if (acrtc_state->active_planes)
5619 amdgpu_dm_commit_cursors(state);
5626 * Enable interrupts on CRTCs that are newly active, undergone
5627 * a modeset, or have active planes again.
5629 * Done in two passes, based on the for_modeset flag:
5630 * Pass 1: For CRTCs going through modeset
5631 * Pass 2: For CRTCs going from 0 to n active planes
5633 * Interrupts can only be enabled after the planes are programmed,
5634 * so this requires a two-pass approach since we don't want to
5635 * just defer the interrupts until after commit planes every time.
5637 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
5638 struct drm_atomic_state *state,
5641 struct amdgpu_device *adev = dev->dev_private;
5642 struct drm_crtc *crtc;
5643 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5646 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5647 new_crtc_state, i) {
5648 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5649 struct dm_crtc_state *dm_new_crtc_state =
5650 to_dm_crtc_state(new_crtc_state);
5651 struct dm_crtc_state *dm_old_crtc_state =
5652 to_dm_crtc_state(old_crtc_state);
5653 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
5656 run_pass = (for_modeset && modeset) ||
5657 (!for_modeset && !modeset &&
5658 !dm_old_crtc_state->interrupts_enabled);
5663 if (!dm_new_crtc_state->interrupts_enabled)
5666 manage_dm_interrupts(adev, acrtc, true);
5668 #ifdef CONFIG_DEBUG_FS
5669 /* The stream has changed so CRC capture needs to re-enabled. */
5670 if (dm_new_crtc_state->crc_enabled) {
5671 dm_new_crtc_state->crc_enabled = false;
5672 amdgpu_dm_crtc_set_crc_source(crtc, "auto");
5679 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
5680 * @crtc_state: the DRM CRTC state
5681 * @stream_state: the DC stream state.
5683 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
5684 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
5686 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
5687 struct dc_stream_state *stream_state)
5689 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
5692 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
5693 struct drm_atomic_state *state,
5696 struct drm_crtc *crtc;
5697 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5698 struct amdgpu_device *adev = dev->dev_private;
5702 * We evade vblank and pflip interrupts on CRTCs that are undergoing
5703 * a modeset, being disabled, or have no active planes.
5705 * It's done in atomic commit rather than commit tail for now since
5706 * some of these interrupt handlers access the current CRTC state and
5707 * potentially the stream pointer itself.
5709 * Since the atomic state is swapped within atomic commit and not within
5710 * commit tail this would leave to new state (that hasn't been committed yet)
5711 * being accesssed from within the handlers.
5713 * TODO: Fix this so we can do this in commit tail and not have to block
5716 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5717 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5718 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5719 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5721 if (dm_old_crtc_state->interrupts_enabled &&
5722 (!dm_new_crtc_state->interrupts_enabled ||
5723 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
5725 * Drop the extra vblank reference added by CRC
5726 * capture if applicable.
5728 if (dm_new_crtc_state->crc_enabled)
5729 drm_crtc_vblank_put(crtc);
5732 * Only keep CRC capture enabled if there's
5733 * still a stream for the CRTC.
5735 if (!dm_new_crtc_state->stream)
5736 dm_new_crtc_state->crc_enabled = false;
5738 manage_dm_interrupts(adev, acrtc, false);
5742 * Add check here for SoC's that support hardware cursor plane, to
5743 * unset legacy_cursor_update
5746 return drm_atomic_helper_commit(dev, state, nonblock);
5748 /*TODO Handle EINTR, reenable IRQ*/
5752 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
5753 * @state: The atomic state to commit
5755 * This will tell DC to commit the constructed DC state from atomic_check,
5756 * programming the hardware. Any failures here implies a hardware failure, since
5757 * atomic check should have filtered anything non-kosher.
5759 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5761 struct drm_device *dev = state->dev;
5762 struct amdgpu_device *adev = dev->dev_private;
5763 struct amdgpu_display_manager *dm = &adev->dm;
5764 struct dm_atomic_state *dm_state;
5765 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5767 struct drm_crtc *crtc;
5768 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5769 unsigned long flags;
5770 bool wait_for_vblank = true;
5771 struct drm_connector *connector;
5772 struct drm_connector_state *old_con_state, *new_con_state;
5773 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5774 int crtc_disable_count = 0;
5776 drm_atomic_helper_update_legacy_modeset_state(dev, state);
5778 dm_state = dm_atomic_get_new_state(state);
5779 if (dm_state && dm_state->context) {
5780 dc_state = dm_state->context;
5782 /* No state changes, retain current state. */
5783 dc_state_temp = dc_create_state(dm->dc);
5784 ASSERT(dc_state_temp);
5785 dc_state = dc_state_temp;
5786 dc_resource_state_copy_construct_current(dm->dc, dc_state);
5789 /* update changed items */
5790 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5791 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5793 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5794 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5797 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5798 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5799 "connectors_changed:%d\n",
5801 new_crtc_state->enable,
5802 new_crtc_state->active,
5803 new_crtc_state->planes_changed,
5804 new_crtc_state->mode_changed,
5805 new_crtc_state->active_changed,
5806 new_crtc_state->connectors_changed);
5808 /* Copy all transient state flags into dc state */
5809 if (dm_new_crtc_state->stream) {
5810 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5811 dm_new_crtc_state->stream);
5814 /* handles headless hotplug case, updating new_state and
5815 * aconnector as needed
5818 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5820 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5822 if (!dm_new_crtc_state->stream) {
5824 * this could happen because of issues with
5825 * userspace notifications delivery.
5826 * In this case userspace tries to set mode on
5827 * display which is disconnected in fact.
5828 * dc_sink is NULL in this case on aconnector.
5829 * We expect reset mode will come soon.
5831 * This can also happen when unplug is done
5832 * during resume sequence ended
5834 * In this case, we want to pretend we still
5835 * have a sink to keep the pipe running so that
5836 * hw state is consistent with the sw state
5838 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5839 __func__, acrtc->base.base.id);
5843 if (dm_old_crtc_state->stream)
5844 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5846 pm_runtime_get_noresume(dev->dev);
5848 acrtc->enabled = true;
5849 acrtc->hw_mode = new_crtc_state->mode;
5850 crtc->hwmode = new_crtc_state->mode;
5851 } else if (modereset_required(new_crtc_state)) {
5852 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5854 /* i.e. reset mode */
5855 if (dm_old_crtc_state->stream)
5856 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5858 } /* for_each_crtc_in_state() */
5861 dm_enable_per_frame_crtc_master_sync(dc_state);
5862 mutex_lock(&dm->dc_lock);
5863 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5864 mutex_unlock(&dm->dc_lock);
5867 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5868 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5870 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5872 if (dm_new_crtc_state->stream != NULL) {
5873 const struct dc_stream_status *status =
5874 dc_stream_get_status(dm_new_crtc_state->stream);
5877 status = dc_stream_get_status_from_state(dc_state,
5878 dm_new_crtc_state->stream);
5881 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5883 acrtc->otg_inst = status->primary_otg_inst;
5887 /* Handle connector state changes */
5888 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5889 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5890 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5891 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5892 struct dc_surface_update dummy_updates[MAX_SURFACES];
5893 struct dc_stream_update stream_update;
5894 struct dc_info_packet hdr_packet;
5895 struct dc_stream_status *status = NULL;
5896 bool abm_changed, hdr_changed, scaling_changed;
5898 memset(&dummy_updates, 0, sizeof(dummy_updates));
5899 memset(&stream_update, 0, sizeof(stream_update));
5902 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5903 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5906 /* Skip any modesets/resets */
5907 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5910 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5911 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5913 scaling_changed = is_scaling_state_different(dm_new_con_state,
5916 abm_changed = dm_new_crtc_state->abm_level !=
5917 dm_old_crtc_state->abm_level;
5920 is_hdr_metadata_different(old_con_state, new_con_state);
5922 if (!scaling_changed && !abm_changed && !hdr_changed)
5925 if (scaling_changed) {
5926 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5927 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5929 stream_update.src = dm_new_crtc_state->stream->src;
5930 stream_update.dst = dm_new_crtc_state->stream->dst;
5934 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5936 stream_update.abm_level = &dm_new_crtc_state->abm_level;
5940 fill_hdr_info_packet(new_con_state, &hdr_packet);
5941 stream_update.hdr_static_metadata = &hdr_packet;
5944 status = dc_stream_get_status(dm_new_crtc_state->stream);
5946 WARN_ON(!status->plane_count);
5949 * TODO: DC refuses to perform stream updates without a dc_surface_update.
5950 * Here we create an empty update on each plane.
5951 * To fix this, DC should permit updating only stream properties.
5953 for (j = 0; j < status->plane_count; j++)
5954 dummy_updates[j].surface = status->plane_states[0];
5957 mutex_lock(&dm->dc_lock);
5958 dc_commit_updates_for_stream(dm->dc,
5960 status->plane_count,
5961 dm_new_crtc_state->stream,
5964 mutex_unlock(&dm->dc_lock);
5967 /* Count number of newly disabled CRTCs for dropping PM refs later. */
5968 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5969 new_crtc_state, i) {
5970 if (old_crtc_state->active && !new_crtc_state->active)
5971 crtc_disable_count++;
5973 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5974 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5976 /* Update freesync active state. */
5977 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
5979 /* Handle vrr on->off / off->on transitions */
5980 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
5984 /* Enable interrupts for CRTCs going through a modeset. */
5985 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
5987 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
5988 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
5989 wait_for_vblank = false;
5991 /* update planes when needed per crtc*/
5992 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5993 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5995 if (dm_new_crtc_state->stream)
5996 amdgpu_dm_commit_planes(state, dc_state, dev,
5997 dm, crtc, wait_for_vblank);
6000 /* Enable interrupts for CRTCs going from 0 to n active planes. */
6001 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6004 * send vblank event on all events not handled in flip and
6005 * mark consumed event for drm_atomic_helper_commit_hw_done
6007 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6008 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6010 if (new_crtc_state->event)
6011 drm_send_event_locked(dev, &new_crtc_state->event->base);
6013 new_crtc_state->event = NULL;
6015 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6017 /* Signal HW programming completion */
6018 drm_atomic_helper_commit_hw_done(state);
6020 if (wait_for_vblank)
6021 drm_atomic_helper_wait_for_flip_done(dev, state);
6023 drm_atomic_helper_cleanup_planes(dev, state);
6026 * Finally, drop a runtime PM reference for each newly disabled CRTC,
6027 * so we can put the GPU into runtime suspend if we're not driving any
6030 for (i = 0; i < crtc_disable_count; i++)
6031 pm_runtime_put_autosuspend(dev->dev);
6032 pm_runtime_mark_last_busy(dev->dev);
6035 dc_release_state(dc_state_temp);
6039 static int dm_force_atomic_commit(struct drm_connector *connector)
6042 struct drm_device *ddev = connector->dev;
6043 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6044 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6045 struct drm_plane *plane = disconnected_acrtc->base.primary;
6046 struct drm_connector_state *conn_state;
6047 struct drm_crtc_state *crtc_state;
6048 struct drm_plane_state *plane_state;
6053 state->acquire_ctx = ddev->mode_config.acquire_ctx;
6055 /* Construct an atomic state to restore previous display setting */
6058 * Attach connectors to drm_atomic_state
6060 conn_state = drm_atomic_get_connector_state(state, connector);
6062 ret = PTR_ERR_OR_ZERO(conn_state);
6066 /* Attach crtc to drm_atomic_state*/
6067 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6069 ret = PTR_ERR_OR_ZERO(crtc_state);
6073 /* force a restore */
6074 crtc_state->mode_changed = true;
6076 /* Attach plane to drm_atomic_state */
6077 plane_state = drm_atomic_get_plane_state(state, plane);
6079 ret = PTR_ERR_OR_ZERO(plane_state);
6084 /* Call commit internally with the state we just constructed */
6085 ret = drm_atomic_commit(state);
6090 DRM_ERROR("Restoring old state failed with %i\n", ret);
6091 drm_atomic_state_put(state);
6097 * This function handles all cases when set mode does not come upon hotplug.
6098 * This includes when a display is unplugged then plugged back into the
6099 * same port and when running without usermode desktop manager supprot
6101 void dm_restore_drm_connector_state(struct drm_device *dev,
6102 struct drm_connector *connector)
6104 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6105 struct amdgpu_crtc *disconnected_acrtc;
6106 struct dm_crtc_state *acrtc_state;
6108 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
6111 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6112 if (!disconnected_acrtc)
6115 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
6116 if (!acrtc_state->stream)
6120 * If the previous sink is not released and different from the current,
6121 * we deduce we are in a state where we can not rely on usermode call
6122 * to turn on the display, so we do it here
6124 if (acrtc_state->stream->sink != aconnector->dc_sink)
6125 dm_force_atomic_commit(&aconnector->base);
6129 * Grabs all modesetting locks to serialize against any blocking commits,
6130 * Waits for completion of all non blocking commits.
6132 static int do_aquire_global_lock(struct drm_device *dev,
6133 struct drm_atomic_state *state)
6135 struct drm_crtc *crtc;
6136 struct drm_crtc_commit *commit;
6140 * Adding all modeset locks to aquire_ctx will
6141 * ensure that when the framework release it the
6142 * extra locks we are locking here will get released to
6144 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
6148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6149 spin_lock(&crtc->commit_lock);
6150 commit = list_first_entry_or_null(&crtc->commit_list,
6151 struct drm_crtc_commit, commit_entry);
6153 drm_crtc_commit_get(commit);
6154 spin_unlock(&crtc->commit_lock);
6160 * Make sure all pending HW programming completed and
6163 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
6166 ret = wait_for_completion_interruptible_timeout(
6167 &commit->flip_done, 10*HZ);
6170 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6171 "timed out\n", crtc->base.id, crtc->name);
6173 drm_crtc_commit_put(commit);
6176 return ret < 0 ? ret : 0;
6179 static void get_freesync_config_for_crtc(
6180 struct dm_crtc_state *new_crtc_state,
6181 struct dm_connector_state *new_con_state)
6183 struct mod_freesync_config config = {0};
6184 struct amdgpu_dm_connector *aconnector =
6185 to_amdgpu_dm_connector(new_con_state->base.connector);
6186 struct drm_display_mode *mode = &new_crtc_state->base.mode;
6187 int vrefresh = drm_mode_vrefresh(mode);
6189 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
6190 vrefresh >= aconnector->min_vfreq &&
6191 vrefresh <= aconnector->max_vfreq;
6193 if (new_crtc_state->vrr_supported) {
6194 new_crtc_state->stream->ignore_msa_timing_param = true;
6195 config.state = new_crtc_state->base.vrr_enabled ?
6196 VRR_STATE_ACTIVE_VARIABLE :
6198 config.min_refresh_in_uhz =
6199 aconnector->min_vfreq * 1000000;
6200 config.max_refresh_in_uhz =
6201 aconnector->max_vfreq * 1000000;
6202 config.vsif_supported = true;
6206 new_crtc_state->freesync_config = config;
6209 static void reset_freesync_config_for_crtc(
6210 struct dm_crtc_state *new_crtc_state)
6212 new_crtc_state->vrr_supported = false;
6214 memset(&new_crtc_state->vrr_params, 0,
6215 sizeof(new_crtc_state->vrr_params));
6216 memset(&new_crtc_state->vrr_infopacket, 0,
6217 sizeof(new_crtc_state->vrr_infopacket));
6220 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
6221 struct drm_atomic_state *state,
6222 struct drm_crtc *crtc,
6223 struct drm_crtc_state *old_crtc_state,
6224 struct drm_crtc_state *new_crtc_state,
6226 bool *lock_and_validation_needed)
6228 struct dm_atomic_state *dm_state = NULL;
6229 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6230 struct dc_stream_state *new_stream;
6234 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
6235 * update changed items
6237 struct amdgpu_crtc *acrtc = NULL;
6238 struct amdgpu_dm_connector *aconnector = NULL;
6239 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
6240 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
6244 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6245 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6246 acrtc = to_amdgpu_crtc(crtc);
6247 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
6249 /* TODO This hack should go away */
6250 if (aconnector && enable) {
6251 /* Make sure fake sink is created in plug-in scenario */
6252 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
6254 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
6257 if (IS_ERR(drm_new_conn_state)) {
6258 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
6262 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
6263 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
6265 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6268 new_stream = create_stream_for_sink(aconnector,
6269 &new_crtc_state->mode,
6271 dm_old_crtc_state->stream);
6274 * we can have no stream on ACTION_SET if a display
6275 * was disconnected during S3, in this case it is not an
6276 * error, the OS will be updated after detection, and
6277 * will do the right thing on next atomic commit
6281 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6282 __func__, acrtc->base.base.id);
6287 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6289 ret = fill_hdr_info_packet(drm_new_conn_state,
6290 &new_stream->hdr_static_metadata);
6294 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
6295 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
6296 new_crtc_state->mode_changed = false;
6297 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6298 new_crtc_state->mode_changed);
6302 /* mode_changed flag may get updated above, need to check again */
6303 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6307 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6308 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6309 "connectors_changed:%d\n",
6311 new_crtc_state->enable,
6312 new_crtc_state->active,
6313 new_crtc_state->planes_changed,
6314 new_crtc_state->mode_changed,
6315 new_crtc_state->active_changed,
6316 new_crtc_state->connectors_changed);
6318 /* Remove stream for any changed/disabled CRTC */
6321 if (!dm_old_crtc_state->stream)
6324 ret = dm_atomic_get_state(state, &dm_state);
6328 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6331 /* i.e. reset mode */
6332 if (dc_remove_stream_from_ctx(
6335 dm_old_crtc_state->stream) != DC_OK) {
6340 dc_stream_release(dm_old_crtc_state->stream);
6341 dm_new_crtc_state->stream = NULL;
6343 reset_freesync_config_for_crtc(dm_new_crtc_state);
6345 *lock_and_validation_needed = true;
6347 } else {/* Add stream for any updated/enabled CRTC */
6349 * Quick fix to prevent NULL pointer on new_stream when
6350 * added MST connectors not found in existing crtc_state in the chained mode
6351 * TODO: need to dig out the root cause of that
6353 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
6356 if (modereset_required(new_crtc_state))
6359 if (modeset_required(new_crtc_state, new_stream,
6360 dm_old_crtc_state->stream)) {
6362 WARN_ON(dm_new_crtc_state->stream);
6364 ret = dm_atomic_get_state(state, &dm_state);
6368 dm_new_crtc_state->stream = new_stream;
6370 dc_stream_retain(new_stream);
6372 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6375 if (dc_add_stream_to_ctx(
6378 dm_new_crtc_state->stream) != DC_OK) {
6383 *lock_and_validation_needed = true;
6388 /* Release extra reference */
6390 dc_stream_release(new_stream);
6393 * We want to do dc stream updates that do not require a
6394 * full modeset below.
6396 if (!(enable && aconnector && new_crtc_state->enable &&
6397 new_crtc_state->active))
6400 * Given above conditions, the dc state cannot be NULL because:
6401 * 1. We're in the process of enabling CRTCs (just been added
6402 * to the dc context, or already is on the context)
6403 * 2. Has a valid connector attached, and
6404 * 3. Is currently active and enabled.
6405 * => The dc stream state currently exists.
6407 BUG_ON(dm_new_crtc_state->stream == NULL);
6409 /* Scaling or underscan settings */
6410 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
6411 update_stream_scaling_settings(
6412 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
6415 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6418 * Color management settings. We also update color properties
6419 * when a modeset is needed, to ensure it gets reprogrammed.
6421 if (dm_new_crtc_state->base.color_mgmt_changed ||
6422 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
6423 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
6426 amdgpu_dm_set_ctm(dm_new_crtc_state);
6429 /* Update Freesync settings. */
6430 get_freesync_config_for_crtc(dm_new_crtc_state,
6437 dc_stream_release(new_stream);
6441 static bool should_reset_plane(struct drm_atomic_state *state,
6442 struct drm_plane *plane,
6443 struct drm_plane_state *old_plane_state,
6444 struct drm_plane_state *new_plane_state)
6446 struct drm_plane *other;
6447 struct drm_plane_state *old_other_state, *new_other_state;
6448 struct drm_crtc_state *new_crtc_state;
6452 * TODO: Remove this hack once the checks below are sufficient
6453 * enough to determine when we need to reset all the planes on
6456 if (state->allow_modeset)
6459 /* Exit early if we know that we're adding or removing the plane. */
6460 if (old_plane_state->crtc != new_plane_state->crtc)
6463 /* old crtc == new_crtc == NULL, plane not in context. */
6464 if (!new_plane_state->crtc)
6468 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
6470 if (!new_crtc_state)
6473 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
6477 * If there are any new primary or overlay planes being added or
6478 * removed then the z-order can potentially change. To ensure
6479 * correct z-order and pipe acquisition the current DC architecture
6480 * requires us to remove and recreate all existing planes.
6482 * TODO: Come up with a more elegant solution for this.
6484 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6485 if (other->type == DRM_PLANE_TYPE_CURSOR)
6488 if (old_other_state->crtc != new_plane_state->crtc &&
6489 new_other_state->crtc != new_plane_state->crtc)
6492 if (old_other_state->crtc != new_other_state->crtc)
6495 /* TODO: Remove this once we can handle fast format changes. */
6496 if (old_other_state->fb && new_other_state->fb &&
6497 old_other_state->fb->format != new_other_state->fb->format)
6504 static int dm_update_plane_state(struct dc *dc,
6505 struct drm_atomic_state *state,
6506 struct drm_plane *plane,
6507 struct drm_plane_state *old_plane_state,
6508 struct drm_plane_state *new_plane_state,
6510 bool *lock_and_validation_needed)
6513 struct dm_atomic_state *dm_state = NULL;
6514 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6515 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6516 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
6517 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
6522 new_plane_crtc = new_plane_state->crtc;
6523 old_plane_crtc = old_plane_state->crtc;
6524 dm_new_plane_state = to_dm_plane_state(new_plane_state);
6525 dm_old_plane_state = to_dm_plane_state(old_plane_state);
6527 /*TODO Implement atomic check for cursor plane */
6528 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6531 needs_reset = should_reset_plane(state, plane, old_plane_state,
6534 /* Remove any changed/removed planes */
6539 if (!old_plane_crtc)
6542 old_crtc_state = drm_atomic_get_old_crtc_state(
6543 state, old_plane_crtc);
6544 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6546 if (!dm_old_crtc_state->stream)
6549 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
6550 plane->base.id, old_plane_crtc->base.id);
6552 ret = dm_atomic_get_state(state, &dm_state);
6556 if (!dc_remove_plane_from_context(
6558 dm_old_crtc_state->stream,
6559 dm_old_plane_state->dc_state,
6560 dm_state->context)) {
6567 dc_plane_state_release(dm_old_plane_state->dc_state);
6568 dm_new_plane_state->dc_state = NULL;
6570 *lock_and_validation_needed = true;
6572 } else { /* Add new planes */
6573 struct dc_plane_state *dc_new_plane_state;
6575 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6578 if (!new_plane_crtc)
6581 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6582 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6584 if (!dm_new_crtc_state->stream)
6590 WARN_ON(dm_new_plane_state->dc_state);
6592 dc_new_plane_state = dc_create_plane_state(dc);
6593 if (!dc_new_plane_state)
6596 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6597 plane->base.id, new_plane_crtc->base.id);
6599 ret = fill_dc_plane_attributes(
6600 new_plane_crtc->dev->dev_private,
6605 dc_plane_state_release(dc_new_plane_state);
6609 ret = dm_atomic_get_state(state, &dm_state);
6611 dc_plane_state_release(dc_new_plane_state);
6616 * Any atomic check errors that occur after this will
6617 * not need a release. The plane state will be attached
6618 * to the stream, and therefore part of the atomic
6619 * state. It'll be released when the atomic state is
6622 if (!dc_add_plane_to_context(
6624 dm_new_crtc_state->stream,
6626 dm_state->context)) {
6628 dc_plane_state_release(dc_new_plane_state);
6632 dm_new_plane_state->dc_state = dc_new_plane_state;
6634 /* Tell DC to do a full surface update every time there
6635 * is a plane change. Inefficient, but works for now.
6637 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
6639 *lock_and_validation_needed = true;
6647 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
6648 struct drm_atomic_state *state,
6649 enum surface_update_type *out_type)
6651 struct dc *dc = dm->dc;
6652 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
6653 int i, j, num_plane, ret = 0;
6654 struct drm_plane_state *old_plane_state, *new_plane_state;
6655 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
6656 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6657 struct drm_plane *plane;
6659 struct drm_crtc *crtc;
6660 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
6661 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
6662 struct dc_stream_status *status = NULL;
6664 struct dc_surface_update *updates;
6665 enum surface_update_type update_type = UPDATE_TYPE_FAST;
6667 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
6670 DRM_ERROR("Failed to allocate plane updates\n");
6671 /* Set type to FULL to avoid crashing in DC*/
6672 update_type = UPDATE_TYPE_FULL;
6676 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6677 struct dc_scaling_info scaling_info;
6678 struct dc_stream_update stream_update;
6680 memset(&stream_update, 0, sizeof(stream_update));
6682 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6683 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
6686 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
6687 update_type = UPDATE_TYPE_FULL;
6691 if (!new_dm_crtc_state->stream)
6694 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
6695 new_plane_crtc = new_plane_state->crtc;
6696 old_plane_crtc = old_plane_state->crtc;
6697 new_dm_plane_state = to_dm_plane_state(new_plane_state);
6698 old_dm_plane_state = to_dm_plane_state(old_plane_state);
6700 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6703 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
6704 update_type = UPDATE_TYPE_FULL;
6708 if (crtc != new_plane_crtc)
6711 updates[num_plane].surface = new_dm_plane_state->dc_state;
6713 if (new_crtc_state->mode_changed) {
6714 stream_update.dst = new_dm_crtc_state->stream->dst;
6715 stream_update.src = new_dm_crtc_state->stream->src;
6718 if (new_crtc_state->color_mgmt_changed) {
6719 updates[num_plane].gamma =
6720 new_dm_plane_state->dc_state->gamma_correction;
6721 updates[num_plane].in_transfer_func =
6722 new_dm_plane_state->dc_state->in_transfer_func;
6723 stream_update.gamut_remap =
6724 &new_dm_crtc_state->stream->gamut_remap_matrix;
6725 stream_update.out_transfer_func =
6726 new_dm_crtc_state->stream->out_transfer_func;
6729 ret = fill_dc_scaling_info(new_plane_state,
6734 updates[num_plane].scaling_info = &scaling_info;
6742 ret = dm_atomic_get_state(state, &dm_state);
6746 old_dm_state = dm_atomic_get_old_state(state);
6747 if (!old_dm_state) {
6752 status = dc_stream_get_status_from_state(old_dm_state->context,
6753 new_dm_crtc_state->stream);
6756 * TODO: DC modifies the surface during this call so we need
6757 * to lock here - find a way to do this without locking.
6759 mutex_lock(&dm->dc_lock);
6760 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
6761 &stream_update, status);
6762 mutex_unlock(&dm->dc_lock);
6764 if (update_type > UPDATE_TYPE_MED) {
6765 update_type = UPDATE_TYPE_FULL;
6773 *out_type = update_type;
6778 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
6779 * @dev: The DRM device
6780 * @state: The atomic state to commit
6782 * Validate that the given atomic state is programmable by DC into hardware.
6783 * This involves constructing a &struct dc_state reflecting the new hardware
6784 * state we wish to commit, then querying DC to see if it is programmable. It's
6785 * important not to modify the existing DC state. Otherwise, atomic_check
6786 * may unexpectedly commit hardware changes.
6788 * When validating the DC state, it's important that the right locks are
6789 * acquired. For full updates case which removes/adds/updates streams on one
6790 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
6791 * that any such full update commit will wait for completion of any outstanding
6792 * flip using DRMs synchronization events. See
6793 * dm_determine_update_type_for_commit()
6795 * Note that DM adds the affected connectors for all CRTCs in state, when that
6796 * might not seem necessary. This is because DC stream creation requires the
6797 * DC sink, which is tied to the DRM connector state. Cleaning this up should
6798 * be possible but non-trivial - a possible TODO item.
6800 * Return: -Error code if validation failed.
6802 static int amdgpu_dm_atomic_check(struct drm_device *dev,
6803 struct drm_atomic_state *state)
6805 struct amdgpu_device *adev = dev->dev_private;
6806 struct dm_atomic_state *dm_state = NULL;
6807 struct dc *dc = adev->dm.dc;
6808 struct drm_connector *connector;
6809 struct drm_connector_state *old_con_state, *new_con_state;
6810 struct drm_crtc *crtc;
6811 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6812 struct drm_plane *plane;
6813 struct drm_plane_state *old_plane_state, *new_plane_state;
6814 enum surface_update_type update_type = UPDATE_TYPE_FAST;
6815 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
6820 * This bool will be set for true for any modeset/reset
6821 * or plane update which implies non fast surface update.
6823 bool lock_and_validation_needed = false;
6825 ret = drm_atomic_helper_check_modeset(dev, state);
6829 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6830 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
6831 !new_crtc_state->color_mgmt_changed &&
6832 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
6835 if (!new_crtc_state->enable)
6838 ret = drm_atomic_add_affected_connectors(state, crtc);
6842 ret = drm_atomic_add_affected_planes(state, crtc);
6848 * Add all primary and overlay planes on the CRTC to the state
6849 * whenever a plane is enabled to maintain correct z-ordering
6850 * and to enable fast surface updates.
6852 drm_for_each_crtc(crtc, dev) {
6853 bool modified = false;
6855 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6856 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6859 if (new_plane_state->crtc == crtc ||
6860 old_plane_state->crtc == crtc) {
6869 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
6870 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6874 drm_atomic_get_plane_state(state, plane);
6876 if (IS_ERR(new_plane_state)) {
6877 ret = PTR_ERR(new_plane_state);
6883 /* Remove exiting planes if they are modified */
6884 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6885 ret = dm_update_plane_state(dc, state, plane,
6889 &lock_and_validation_needed);
6894 /* Disable all crtcs which require disable */
6895 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6896 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6900 &lock_and_validation_needed);
6905 /* Enable all crtcs which require enable */
6906 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6907 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6911 &lock_and_validation_needed);
6916 /* Add new/modified planes */
6917 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6918 ret = dm_update_plane_state(dc, state, plane,
6922 &lock_and_validation_needed);
6927 /* Run this here since we want to validate the streams we created */
6928 ret = drm_atomic_helper_check_planes(dev, state);
6932 /* Check scaling and underscan changes*/
6933 /* TODO Removed scaling changes validation due to inability to commit
6934 * new stream into context w\o causing full reset. Need to
6935 * decide how to handle.
6937 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6938 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6939 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6940 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6942 /* Skip any modesets/resets */
6943 if (!acrtc || drm_atomic_crtc_needs_modeset(
6944 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6947 /* Skip any thing not scale or underscan changes */
6948 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6951 overall_update_type = UPDATE_TYPE_FULL;
6952 lock_and_validation_needed = true;
6955 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
6959 if (overall_update_type < update_type)
6960 overall_update_type = update_type;
6963 * lock_and_validation_needed was an old way to determine if we need to set
6964 * the global lock. Leaving it in to check if we broke any corner cases
6965 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6966 * lock_and_validation_needed false = UPDATE_TYPE_FAST
6968 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6969 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6971 if (overall_update_type > UPDATE_TYPE_FAST) {
6972 ret = dm_atomic_get_state(state, &dm_state);
6976 ret = do_aquire_global_lock(dev, state);
6980 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
6984 } else if (state->legacy_cursor_update) {
6986 * This is a fast cursor update coming from the plane update
6987 * helper, check if it can be done asynchronously for better
6990 state->async_update = !drm_atomic_helper_async_check(dev, state);
6993 /* Must be success */
6998 if (ret == -EDEADLK)
6999 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7000 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
7001 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7003 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
7008 static bool is_dp_capable_without_timing_msa(struct dc *dc,
7009 struct amdgpu_dm_connector *amdgpu_dm_connector)
7012 bool capable = false;
7014 if (amdgpu_dm_connector->dc_link &&
7015 dm_helpers_dp_read_dpcd(
7017 amdgpu_dm_connector->dc_link,
7018 DP_DOWN_STREAM_PORT_COUNT,
7020 sizeof(dpcd_data))) {
7021 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
7026 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
7030 bool edid_check_required;
7031 struct detailed_timing *timing;
7032 struct detailed_non_pixel *data;
7033 struct detailed_data_monitor_range *range;
7034 struct amdgpu_dm_connector *amdgpu_dm_connector =
7035 to_amdgpu_dm_connector(connector);
7036 struct dm_connector_state *dm_con_state = NULL;
7038 struct drm_device *dev = connector->dev;
7039 struct amdgpu_device *adev = dev->dev_private;
7040 bool freesync_capable = false;
7042 if (!connector->state) {
7043 DRM_ERROR("%s - Connector has no state", __func__);
7048 dm_con_state = to_dm_connector_state(connector->state);
7050 amdgpu_dm_connector->min_vfreq = 0;
7051 amdgpu_dm_connector->max_vfreq = 0;
7052 amdgpu_dm_connector->pixel_clock_mhz = 0;
7057 dm_con_state = to_dm_connector_state(connector->state);
7059 edid_check_required = false;
7060 if (!amdgpu_dm_connector->dc_sink) {
7061 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7064 if (!adev->dm.freesync_module)
7067 * if edid non zero restrict freesync only for dp and edp
7070 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
7071 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
7072 edid_check_required = is_dp_capable_without_timing_msa(
7074 amdgpu_dm_connector);
7077 if (edid_check_required == true && (edid->version > 1 ||
7078 (edid->version == 1 && edid->revision > 1))) {
7079 for (i = 0; i < 4; i++) {
7081 timing = &edid->detailed_timings[i];
7082 data = &timing->data.other_data;
7083 range = &data->data.range;
7085 * Check if monitor has continuous frequency mode
7087 if (data->type != EDID_DETAIL_MONITOR_RANGE)
7090 * Check for flag range limits only. If flag == 1 then
7091 * no additional timing information provided.
7092 * Default GTF, GTF Secondary curve and CVT are not
7095 if (range->flags != 1)
7098 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
7099 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
7100 amdgpu_dm_connector->pixel_clock_mhz =
7101 range->pixel_clock_mhz * 10;
7105 if (amdgpu_dm_connector->max_vfreq -
7106 amdgpu_dm_connector->min_vfreq > 10) {
7108 freesync_capable = true;
7114 dm_con_state->freesync_capable = freesync_capable;
7116 if (connector->vrr_capable_property)
7117 drm_connector_set_vrr_capable_property(connector,