2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
135 * Returns 0 on success
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 struct drm_plane *plane,
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
151 struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
167 static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
177 * dm_vblank_get_counter
180 * Get counter for number of vertical blanks
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
187 * Counter for vertical blanks
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
191 if (crtc >= adev->mode_info.num_crtc)
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
199 if (acrtc_state->stream == NULL) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
205 return dc_stream_get_vblank_counter(acrtc_state->stream);
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 u32 *vbl, u32 *position)
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
221 if (acrtc_state->stream == NULL) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
231 dc_stream_get_scanoutpos(acrtc_state->stream,
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
244 static bool dm_is_idle(void *handle)
250 static int dm_wait_for_idle(void *handle)
256 static bool dm_check_soft_reset(void *handle)
261 static int dm_soft_reset(void *handle)
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
271 struct drm_device *dev = adev->ddev;
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
275 if (otg_inst == -1) {
277 return adev->mode_info.crtcs[0];
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
283 if (amdgpu_crtc->otg_inst == otg_inst)
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
303 static void dm_pflip_high_irq(void *interrupt_params)
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
323 spin_lock_irqsave(&adev->ddev->event_lock, flags);
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
331 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
383 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
403 static void dm_vupdate_high_irq(void *interrupt_params)
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
418 amdgpu_dm_vrr_active(acrtc_state));
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
426 if (amdgpu_dm_vrr_active(acrtc_state)) {
427 drm_crtc_handle_vblank(&acrtc->base);
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
436 &acrtc_state->vrr_params);
438 dc_stream_adjust_vmin_vmax(
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
455 static void dm_crtc_high_irq(void *interrupt_params)
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
459 struct amdgpu_crtc *acrtc;
460 struct dm_crtc_state *acrtc_state;
463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
492 spin_lock_irqsave(&adev->ddev->event_lock, flags);
494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
498 &acrtc_state->vrr_params);
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 acrtc_state->active_planes == 0) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
520 drm_crtc_vblank_put(&acrtc->base);
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
525 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
528 static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
534 static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = dev->dev_private;
548 struct dm_comressor_info *compressor = &adev->dm.compressor;
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
551 unsigned long max_size = 0;
553 if (adev->dm.dc->fbc_compressor == NULL)
556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
559 if (compressor->bo_ptr)
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 &compressor->gpu_addr, &compressor->cpu_addr);
574 DRM_ERROR("DM: Failed to initialize FBC\n");
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = dev->dev_private;
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
597 mutex_lock(&adev->dm.audio_lock);
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
611 drm_connector_list_iter_end(&conn_iter);
613 mutex_unlock(&adev->dm.audio_lock);
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = dev->dev_private;
629 struct drm_audio_component *acomp = data;
631 acomp->ops = &amdgpu_dm_audio_component_ops;
633 adev->dm.audio_component = acomp;
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = dev->dev_private;
643 struct drm_audio_component *acomp = data;
647 adev->dm.audio_component = NULL;
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
662 adev->mode_info.audio.enabled = true;
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
682 adev->dm.audio_registered = true;
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
692 if (!adev->mode_info.audio.enabled)
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
700 /* TODO: Disable audio? */
702 adev->mode_info.audio.enabled = false;
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
707 struct drm_audio_component *acomp = adev->dm.audio_component;
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
732 /* DMUB isn't supported on the ASIC. */
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
811 hw_params.psp_version = dmcu->psp_version;
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
827 /* Init DMCU and ABM if available. */
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
847 struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
853 adev->dm.ddev = adev->ddev;
854 adev->dm.adev = adev;
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
862 mutex_init(&adev->dm.dc_lock);
863 mutex_init(&adev->dm.audio_lock);
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
870 init_data.asic_id.chip_family = adev->family;
872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
875 init_data.asic_id.vram_width = adev->gmc.vram_width;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
880 init_data.driver = adev;
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
889 init_data.cgs_device = adev->dm.cgs_device;
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
893 switch (adev->asic_type) {
898 init_data.flags.gpu_vm_support = true;
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
913 init_data.flags.power_down_display_on_boot = true;
915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
944 r = dm_dmub_hw_init(adev);
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
950 dc_hardware_init(adev->dm.dc);
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
955 "amdgpu: failed to initialize freesync_module.\n");
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev->dm.freesync_module);
960 amdgpu_dm_init_color_mod();
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev->asic_type >= CHIP_RAVEN) {
964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
971 dc_init_callbacks(adev->dm.dc, &init_params);
974 if (amdgpu_dm_initialize_drm_device(adev)) {
976 "amdgpu: failed to initialize sw for display support.\n");
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
986 /* TODO: Add_display_info? */
988 /* TODO use dynamic cursor width */
989 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
992 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
994 "amdgpu: failed to initialize sw for display support.\n");
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
1002 amdgpu_dm_fini(adev);
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1015 amdgpu_dm_audio_fini(adev);
1017 amdgpu_dm_destroy_drm_device(&adev->dm);
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1026 dc_deinit_callbacks(adev->dm.dc);
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
1038 /* DC Destroy TODO: Replace destroy DAL */
1040 dc_destroy(&adev->dm.dc);
1042 * TODO: pageflip, vlank interrupt
1044 * amdgpu_dm_irq_fini(adev);
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1056 mutex_destroy(&adev->dm.audio_lock);
1057 mutex_destroy(&adev->dm.dc_lock);
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1064 const char *fw_name_dmcu = NULL;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1068 switch(adev->asic_type) {
1078 case CHIP_POLARIS11:
1079 case CHIP_POLARIS10:
1080 case CHIP_POLARIS12:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 case CHIP_SIENNA_CICHLID:
1090 case CHIP_NAVY_FLOUNDER:
1094 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1097 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1109 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1114 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1116 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 adev->dm.fw_dmcu = NULL;
1122 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1127 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1129 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1131 release_firmware(adev->dm.fw_dmcu);
1132 adev->dm.fw_dmcu = NULL;
1136 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 adev->firmware.fw_size +=
1140 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1142 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 adev->firmware.fw_size +=
1145 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1149 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1156 struct amdgpu_device *adev = ctx;
1158 return dm_read_reg(adev->dm.dc->ctx, address);
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1164 struct amdgpu_device *adev = ctx;
1166 return dm_write_reg(adev->dm.dc->ctx, address, value);
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1171 struct dmub_srv_create_params create_params;
1172 struct dmub_srv_region_params region_params;
1173 struct dmub_srv_region_info region_info;
1174 struct dmub_srv_fb_params fb_params;
1175 struct dmub_srv_fb_info *fb_info;
1176 struct dmub_srv *dmub_srv;
1177 const struct dmcub_firmware_header_v1_0 *hdr;
1178 const char *fw_name_dmub;
1179 enum dmub_asic dmub_asic;
1180 enum dmub_status status;
1183 switch (adev->asic_type) {
1185 dmub_asic = DMUB_ASIC_DCN21;
1186 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 case CHIP_SIENNA_CICHLID:
1190 dmub_asic = DMUB_ASIC_DCN30;
1191 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1193 case CHIP_NAVY_FLOUNDER:
1194 dmub_asic = DMUB_ASIC_DCN30;
1195 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1200 /* ASIC doesn't support DMUB. */
1204 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1206 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1210 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1212 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1216 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1218 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 AMDGPU_UCODE_ID_DMCUB;
1221 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1223 adev->firmware.fw_size +=
1224 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1226 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 adev->dm.dmcub_fw_version);
1230 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1232 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 dmub_srv = adev->dm.dmub_srv;
1236 DRM_ERROR("Failed to allocate DMUB service!\n");
1240 memset(&create_params, 0, sizeof(create_params));
1241 create_params.user_ctx = adev;
1242 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 create_params.asic = dmub_asic;
1246 /* Create the DMUB service. */
1247 status = dmub_srv_create(dmub_srv, &create_params);
1248 if (status != DMUB_STATUS_OK) {
1249 DRM_ERROR("Error creating DMUB service: %d\n", status);
1253 /* Calculate the size of all the regions for the DMUB service. */
1254 memset(®ion_params, 0, sizeof(region_params));
1256 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 region_params.vbios_size = adev->bios_size;
1260 region_params.fw_bss_data = region_params.bss_data_size ?
1261 adev->dm.dmub_fw->data +
1262 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 region_params.fw_inst_const =
1265 adev->dm.dmub_fw->data +
1266 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1272 if (status != DMUB_STATUS_OK) {
1273 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1278 * Allocate a framebuffer based on the total size of all the regions.
1279 * TODO: Move this into GART.
1281 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 &adev->dm.dmub_bo_gpu_addr,
1284 &adev->dm.dmub_bo_cpu_addr);
1288 /* Rebase the regions on the framebuffer address. */
1289 memset(&fb_params, 0, sizeof(fb_params));
1290 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 fb_params.region_info = ®ion_info;
1294 adev->dm.dmub_fb_info =
1295 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 fb_info = adev->dm.dmub_fb_info;
1300 "Failed to allocate framebuffer info for DMUB service!\n");
1304 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 if (status != DMUB_STATUS_OK) {
1306 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 static int dm_sw_init(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 r = dm_dmub_sw_init(adev);
1322 return load_dmcu_fw(adev);
1325 static int dm_sw_fini(void *handle)
1327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329 kfree(adev->dm.dmub_fb_info);
1330 adev->dm.dmub_fb_info = NULL;
1332 if (adev->dm.dmub_srv) {
1333 dmub_srv_destroy(adev->dm.dmub_srv);
1334 adev->dm.dmub_srv = NULL;
1337 release_firmware(adev->dm.dmub_fw);
1338 adev->dm.dmub_fw = NULL;
1340 release_firmware(adev->dm.fw_dmcu);
1341 adev->dm.fw_dmcu = NULL;
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1348 struct amdgpu_dm_connector *aconnector;
1349 struct drm_connector *connector;
1350 struct drm_connector_list_iter iter;
1353 drm_connector_list_iter_begin(dev, &iter);
1354 drm_for_each_connector_iter(connector, &iter) {
1355 aconnector = to_amdgpu_dm_connector(connector);
1356 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 aconnector->mst_mgr.aux) {
1358 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1360 aconnector->base.base.id);
1362 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1364 DRM_ERROR("DM_MST: Failed to start MST\n");
1365 aconnector->dc_link->type =
1366 dc_connection_single;
1371 drm_connector_list_iter_end(&iter);
1376 static int dm_late_init(void *handle)
1378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1380 struct dmcu_iram_parameters params;
1381 unsigned int linear_lut[16];
1383 struct dmcu *dmcu = NULL;
1386 if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1387 return detect_mst_link_for_all_connectors(adev->ddev);
1389 dmcu = adev->dm.dc->res_pool->dmcu;
1391 for (i = 0; i < 16; i++)
1392 linear_lut[i] = 0xFFFF * i / 15;
1395 params.backlight_ramping_start = 0xCCCC;
1396 params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 params.backlight_lut_array_size = 16;
1398 params.backlight_lut_array = linear_lut;
1400 /* Min backlight level after ABM reduction, Don't allow below 1%
1401 * 0xFFFF x 0.01 = 0x28F
1403 params.min_abm_backlight = 0x28F;
1405 /* In the case where abm is implemented on dmcub,
1406 * dmcu object will be null.
1407 * ABM 2.4 and up are implemented on dmcub.
1410 ret = dmcu_load_iram(dmcu, params);
1411 else if (adev->dm.dc->ctx->dmub_srv)
1412 ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1417 return detect_mst_link_for_all_connectors(adev->ddev);
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1422 struct amdgpu_dm_connector *aconnector;
1423 struct drm_connector *connector;
1424 struct drm_connector_list_iter iter;
1425 struct drm_dp_mst_topology_mgr *mgr;
1427 bool need_hotplug = false;
1429 drm_connector_list_iter_begin(dev, &iter);
1430 drm_for_each_connector_iter(connector, &iter) {
1431 aconnector = to_amdgpu_dm_connector(connector);
1432 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 aconnector->mst_port)
1436 mgr = &aconnector->mst_mgr;
1439 drm_dp_mst_topology_mgr_suspend(mgr);
1441 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1443 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 need_hotplug = true;
1448 drm_connector_list_iter_end(&iter);
1451 drm_kms_helper_hotplug_event(dev);
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1456 struct smu_context *smu = &adev->smu;
1459 if (!is_support_sw_smu(adev))
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1492 switch(adev->asic_type) {
1501 ret = smu_write_watermarks_table(smu);
1503 DRM_ERROR("Failed to update WMTABLE!\n");
1511 * dm_hw_init() - Initialize DC device
1512 * @handle: The base driver device containing the amdgpu_dm device.
1514 * Initialize the &struct amdgpu_display_manager device. This involves calling
1515 * the initializers of each DM component, then populating the struct with them.
1517 * Although the function implies hardware initialization, both hardware and
1518 * software are initialized here. Splitting them out to their relevant init
1519 * hooks is a future TODO item.
1521 * Some notable things that are initialized here:
1523 * - Display Core, both software and hardware
1524 * - DC modules that we need (freesync and color management)
1525 * - DRM software states
1526 * - Interrupt sources and handlers
1528 * - Debug FS entries, if enabled
1530 static int dm_hw_init(void *handle)
1532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 /* Create DAL display manager */
1534 amdgpu_dm_init(adev);
1535 amdgpu_dm_hpd_init(adev);
1541 * dm_hw_fini() - Teardown DC device
1542 * @handle: The base driver device containing the amdgpu_dm device.
1544 * Teardown components within &struct amdgpu_display_manager that require
1545 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546 * were loaded. Also flush IRQ workqueues and disable them.
1548 static int dm_hw_fini(void *handle)
1550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1552 amdgpu_dm_hpd_fini(adev);
1554 amdgpu_dm_irq_fini(adev);
1555 amdgpu_dm_fini(adev);
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 struct dc_state *state, bool enable)
1566 enum dc_irq_source irq_source;
1567 struct amdgpu_crtc *acrtc;
1571 for (i = 0; i < state->stream_count; i++) {
1572 acrtc = get_crtc_by_otg_inst(
1573 adev, state->stream_status[i].primary_otg_inst);
1575 if (acrtc && state->stream_status[i].plane_count != 0) {
1576 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 acrtc->crtc_id, enable ? "en" : "dis", rc);
1581 DRM_WARN("Failed to %s pflip interrupts\n",
1582 enable ? "enable" : "disable");
1585 rc = dm_enable_vblank(&acrtc->base);
1587 DRM_WARN("Failed to enable vblank interrupts\n");
1589 dm_disable_vblank(&acrtc->base);
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1599 struct dc_state *context = NULL;
1600 enum dc_status res = DC_ERROR_UNEXPECTED;
1602 struct dc_stream_state *del_streams[MAX_PIPES];
1603 int del_streams_count = 0;
1605 memset(del_streams, 0, sizeof(del_streams));
1607 context = dc_create_state(dc);
1608 if (context == NULL)
1609 goto context_alloc_fail;
1611 dc_resource_state_copy_construct_current(dc, context);
1613 /* First remove from context all streams */
1614 for (i = 0; i < context->stream_count; i++) {
1615 struct dc_stream_state *stream = context->streams[i];
1617 del_streams[del_streams_count++] = stream;
1620 /* Remove all planes for removed streams and then remove the streams */
1621 for (i = 0; i < del_streams_count; i++) {
1622 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 res = DC_FAIL_DETACH_SURFACES;
1627 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1633 res = dc_validate_global_state(dc, context, false);
1636 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1640 res = dc_commit_state(dc, context);
1643 dc_release_state(context);
1649 static int dm_suspend(void *handle)
1651 struct amdgpu_device *adev = handle;
1652 struct amdgpu_display_manager *dm = &adev->dm;
1655 if (adev->in_gpu_reset) {
1656 mutex_lock(&dm->dc_lock);
1657 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1659 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1661 amdgpu_dm_commit_zero_streams(dm->dc);
1663 amdgpu_dm_irq_suspend(adev);
1668 WARN_ON(adev->dm.cached_state);
1669 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1671 s3_handle_mst(adev->ddev, true);
1673 amdgpu_dm_irq_suspend(adev);
1676 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 struct drm_crtc *crtc)
1686 struct drm_connector_state *new_con_state;
1687 struct drm_connector *connector;
1688 struct drm_crtc *crtc_from_state;
1690 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 crtc_from_state = new_con_state->crtc;
1693 if (crtc_from_state == crtc)
1694 return to_amdgpu_dm_connector(connector);
1700 static void emulated_link_detect(struct dc_link *link)
1702 struct dc_sink_init_data sink_init_data = { 0 };
1703 struct display_sink_capability sink_caps = { 0 };
1704 enum dc_edid_status edid_status;
1705 struct dc_context *dc_ctx = link->ctx;
1706 struct dc_sink *sink = NULL;
1707 struct dc_sink *prev_sink = NULL;
1709 link->type = dc_connection_none;
1710 prev_sink = link->local_sink;
1712 if (prev_sink != NULL)
1713 dc_sink_retain(prev_sink);
1715 switch (link->connector_signal) {
1716 case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1722 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1728 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1734 case SIGNAL_TYPE_LVDS: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_LVDS;
1740 case SIGNAL_TYPE_EDP: {
1741 sink_caps.transaction_type =
1742 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 sink_caps.signal = SIGNAL_TYPE_EDP;
1747 case SIGNAL_TYPE_DISPLAY_PORT: {
1748 sink_caps.transaction_type =
1749 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1755 DC_ERROR("Invalid connector type! signal:%d\n",
1756 link->connector_signal);
1760 sink_init_data.link = link;
1761 sink_init_data.sink_signal = sink_caps.signal;
1763 sink = dc_sink_create(&sink_init_data);
1765 DC_ERROR("Failed to create sink!\n");
1769 /* dc_sink_create returns a new reference */
1770 link->local_sink = sink;
1772 edid_status = dm_helpers_read_local_edid(
1777 if (edid_status != EDID_OK)
1778 DC_ERROR("Failed to read EDID");
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 struct amdgpu_display_manager *dm)
1786 struct dc_surface_update surface_updates[MAX_SURFACES];
1787 struct dc_plane_info plane_infos[MAX_SURFACES];
1788 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 struct dc_stream_update stream_update;
1794 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1797 dm_error("Failed to allocate update bundle\n");
1801 for (k = 0; k < dc_state->stream_count; k++) {
1802 bundle->stream_update.stream = dc_state->streams[k];
1804 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 bundle->surface_updates[m].surface =
1806 dc_state->stream_status->plane_states[m];
1807 bundle->surface_updates[m].surface->force_full_update =
1810 dc_commit_updates_for_stream(
1811 dm->dc, bundle->surface_updates,
1812 dc_state->stream_status->plane_count,
1813 dc_state->streams[k], &bundle->stream_update, dc_state);
1822 static int dm_resume(void *handle)
1824 struct amdgpu_device *adev = handle;
1825 struct drm_device *ddev = adev->ddev;
1826 struct amdgpu_display_manager *dm = &adev->dm;
1827 struct amdgpu_dm_connector *aconnector;
1828 struct drm_connector *connector;
1829 struct drm_connector_list_iter iter;
1830 struct drm_crtc *crtc;
1831 struct drm_crtc_state *new_crtc_state;
1832 struct dm_crtc_state *dm_new_crtc_state;
1833 struct drm_plane *plane;
1834 struct drm_plane_state *new_plane_state;
1835 struct dm_plane_state *dm_new_plane_state;
1836 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 enum dc_connection_type new_connection_type = dc_connection_none;
1838 struct dc_state *dc_state;
1841 if (adev->in_gpu_reset) {
1842 dc_state = dm->cached_dc_state;
1844 r = dm_dmub_hw_init(adev);
1846 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1848 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1851 amdgpu_dm_irq_resume_early(adev);
1853 for (i = 0; i < dc_state->stream_count; i++) {
1854 dc_state->streams[i]->mode_changed = true;
1855 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 dc_state->stream_status->plane_states[j]->update_flags.raw
1861 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1863 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1865 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1867 dc_release_state(dm->cached_dc_state);
1868 dm->cached_dc_state = NULL;
1870 amdgpu_dm_irq_resume_late(adev);
1872 mutex_unlock(&dm->dc_lock);
1876 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 dc_release_state(dm_state->context);
1878 dm_state->context = dc_create_state(dm->dc);
1879 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 dc_resource_state_construct(dm->dc, dm_state->context);
1882 /* Before powering on DC we need to re-initialize DMUB. */
1883 r = dm_dmub_hw_init(adev);
1885 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1887 /* power on hardware */
1888 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1890 /* program HPD filter */
1894 * early enable HPD Rx IRQ, should be done before set mode as short
1895 * pulse interrupts are used for MST
1897 amdgpu_dm_irq_resume_early(adev);
1899 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 s3_handle_mst(ddev, false);
1903 drm_connector_list_iter_begin(ddev, &iter);
1904 drm_for_each_connector_iter(connector, &iter) {
1905 aconnector = to_amdgpu_dm_connector(connector);
1908 * this is the case when traversing through already created
1909 * MST connectors, should be skipped
1911 if (aconnector->mst_port)
1914 mutex_lock(&aconnector->hpd_lock);
1915 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 DRM_ERROR("KMS: Failed to detect connector\n");
1918 if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 emulated_link_detect(aconnector->dc_link);
1921 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1923 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 aconnector->fake_enable = false;
1926 if (aconnector->dc_sink)
1927 dc_sink_release(aconnector->dc_sink);
1928 aconnector->dc_sink = NULL;
1929 amdgpu_dm_update_connector_after_detect(aconnector);
1930 mutex_unlock(&aconnector->hpd_lock);
1932 drm_connector_list_iter_end(&iter);
1934 /* Force mode set in atomic commit */
1935 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 new_crtc_state->active_changed = true;
1939 * atomic_check is expected to create the dc states. We need to release
1940 * them here, since they were duplicated as part of the suspend
1943 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 if (dm_new_crtc_state->stream) {
1946 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 dc_stream_release(dm_new_crtc_state->stream);
1948 dm_new_crtc_state->stream = NULL;
1952 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 if (dm_new_plane_state->dc_state) {
1955 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 dc_plane_state_release(dm_new_plane_state->dc_state);
1957 dm_new_plane_state->dc_state = NULL;
1961 drm_atomic_helper_resume(ddev, dm->cached_state);
1963 dm->cached_state = NULL;
1965 amdgpu_dm_irq_resume_late(adev);
1967 amdgpu_dm_smu_write_watermarks_table(adev);
1975 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977 * the base driver's device list to be initialized and torn down accordingly.
1979 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1984 .early_init = dm_early_init,
1985 .late_init = dm_late_init,
1986 .sw_init = dm_sw_init,
1987 .sw_fini = dm_sw_fini,
1988 .hw_init = dm_hw_init,
1989 .hw_fini = dm_hw_fini,
1990 .suspend = dm_suspend,
1991 .resume = dm_resume,
1992 .is_idle = dm_is_idle,
1993 .wait_for_idle = dm_wait_for_idle,
1994 .check_soft_reset = dm_check_soft_reset,
1995 .soft_reset = dm_soft_reset,
1996 .set_clockgating_state = dm_set_clockgating_state,
1997 .set_powergating_state = dm_set_powergating_state,
2000 const struct amdgpu_ip_block_version dm_ip_block =
2002 .type = AMD_IP_BLOCK_TYPE_DCE,
2006 .funcs = &amdgpu_dm_funcs,
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 .fb_create = amdgpu_display_user_framebuffer_create,
2018 .output_poll_changed = drm_fb_helper_output_poll_changed,
2019 .atomic_check = amdgpu_dm_atomic_check,
2020 .atomic_commit = amdgpu_dm_atomic_commit,
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2029 u32 max_cll, min_cll, max, min, q, r;
2030 struct amdgpu_dm_backlight_caps *caps;
2031 struct amdgpu_display_manager *dm;
2032 struct drm_connector *conn_base;
2033 struct amdgpu_device *adev;
2034 struct dc_link *link = NULL;
2035 static const u8 pre_computed_values[] = {
2036 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2039 if (!aconnector || !aconnector->dc_link)
2042 link = aconnector->dc_link;
2043 if (link->connector_signal != SIGNAL_TYPE_EDP)
2046 conn_base = &aconnector->base;
2047 adev = conn_base->dev->dev_private;
2049 caps = &dm->backlight_caps;
2050 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 caps->aux_support = false;
2052 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2055 if (caps->ext_caps->bits.oled == 1 ||
2056 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 caps->aux_support = true;
2060 /* From the specification (CTA-861-G), for calculating the maximum
2061 * luminance we need to use:
2062 * Luminance = 50*2**(CV/32)
2063 * Where CV is a one-byte value.
2064 * For calculating this expression we may need float point precision;
2065 * to avoid this complexity level, we take advantage that CV is divided
2066 * by a constant. From the Euclids division algorithm, we know that CV
2067 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 * need to pre-compute the value of r/32. For pre-computing the values
2070 * We just used the following Ruby line:
2071 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 * The results of the above expressions can be verified at
2073 * pre_computed_values.
2077 max = (1 << q) * pre_computed_values[r];
2079 // min luminance: maxLum * (CV/255)^2 / 100
2080 q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2083 caps->aux_max_input_signal = max;
2084 caps->aux_min_input_signal = min;
2087 void amdgpu_dm_update_connector_after_detect(
2088 struct amdgpu_dm_connector *aconnector)
2090 struct drm_connector *connector = &aconnector->base;
2091 struct drm_device *dev = connector->dev;
2092 struct dc_sink *sink;
2094 /* MST handled by drm_mst framework */
2095 if (aconnector->mst_mgr.mst_state == true)
2099 sink = aconnector->dc_link->local_sink;
2101 dc_sink_retain(sink);
2104 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 * the connector sink is set to either fake or physical sink depends on link status.
2106 * Skip if already done during boot.
2108 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 && aconnector->dc_em_sink) {
2112 * For S3 resume with headless use eml_sink to fake stream
2113 * because on resume connector->sink is set to NULL
2115 mutex_lock(&dev->mode_config.mutex);
2118 if (aconnector->dc_sink) {
2119 amdgpu_dm_update_freesync_caps(connector, NULL);
2121 * retain and release below are used to
2122 * bump up refcount for sink because the link doesn't point
2123 * to it anymore after disconnect, so on next crtc to connector
2124 * reshuffle by UMD we will get into unwanted dc_sink release
2126 dc_sink_release(aconnector->dc_sink);
2128 aconnector->dc_sink = sink;
2129 dc_sink_retain(aconnector->dc_sink);
2130 amdgpu_dm_update_freesync_caps(connector,
2133 amdgpu_dm_update_freesync_caps(connector, NULL);
2134 if (!aconnector->dc_sink) {
2135 aconnector->dc_sink = aconnector->dc_em_sink;
2136 dc_sink_retain(aconnector->dc_sink);
2140 mutex_unlock(&dev->mode_config.mutex);
2143 dc_sink_release(sink);
2148 * TODO: temporary guard to look for proper fix
2149 * if this sink is MST sink, we should not do anything
2151 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 dc_sink_release(sink);
2156 if (aconnector->dc_sink == sink) {
2158 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2161 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 aconnector->connector_id);
2164 dc_sink_release(sink);
2168 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 aconnector->connector_id, aconnector->dc_sink, sink);
2171 mutex_lock(&dev->mode_config.mutex);
2174 * 1. Update status of the drm connector
2175 * 2. Send an event and let userspace tell us what to do
2179 * TODO: check if we still need the S3 mode update workaround.
2180 * If yes, put it here.
2182 if (aconnector->dc_sink)
2183 amdgpu_dm_update_freesync_caps(connector, NULL);
2185 aconnector->dc_sink = sink;
2186 dc_sink_retain(aconnector->dc_sink);
2187 if (sink->dc_edid.length == 0) {
2188 aconnector->edid = NULL;
2189 if (aconnector->dc_link->aux_mode) {
2190 drm_dp_cec_unset_edid(
2191 &aconnector->dm_dp_aux.aux);
2195 (struct edid *)sink->dc_edid.raw_edid;
2197 drm_connector_update_edid_property(connector,
2200 if (aconnector->dc_link->aux_mode)
2201 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2205 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2206 update_connector_ext_caps(aconnector);
2208 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2209 amdgpu_dm_update_freesync_caps(connector, NULL);
2210 drm_connector_update_edid_property(connector, NULL);
2211 aconnector->num_modes = 0;
2212 dc_sink_release(aconnector->dc_sink);
2213 aconnector->dc_sink = NULL;
2214 aconnector->edid = NULL;
2215 #ifdef CONFIG_DRM_AMD_DC_HDCP
2216 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2217 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2218 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2222 mutex_unlock(&dev->mode_config.mutex);
2225 dc_sink_release(sink);
2228 static void handle_hpd_irq(void *param)
2230 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2231 struct drm_connector *connector = &aconnector->base;
2232 struct drm_device *dev = connector->dev;
2233 enum dc_connection_type new_connection_type = dc_connection_none;
2234 #ifdef CONFIG_DRM_AMD_DC_HDCP
2235 struct amdgpu_device *adev = dev->dev_private;
2239 * In case of failure or MST no need to update connector status or notify the OS
2240 * since (for MST case) MST does this in its own context.
2242 mutex_lock(&aconnector->hpd_lock);
2244 #ifdef CONFIG_DRM_AMD_DC_HDCP
2245 if (adev->dm.hdcp_workqueue)
2246 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2248 if (aconnector->fake_enable)
2249 aconnector->fake_enable = false;
2251 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2252 DRM_ERROR("KMS: Failed to detect connector\n");
2254 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2255 emulated_link_detect(aconnector->dc_link);
2258 drm_modeset_lock_all(dev);
2259 dm_restore_drm_connector_state(dev, connector);
2260 drm_modeset_unlock_all(dev);
2262 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2263 drm_kms_helper_hotplug_event(dev);
2265 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2266 amdgpu_dm_update_connector_after_detect(aconnector);
2269 drm_modeset_lock_all(dev);
2270 dm_restore_drm_connector_state(dev, connector);
2271 drm_modeset_unlock_all(dev);
2273 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2274 drm_kms_helper_hotplug_event(dev);
2276 mutex_unlock(&aconnector->hpd_lock);
2280 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2282 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2284 bool new_irq_handled = false;
2286 int dpcd_bytes_to_read;
2288 const int max_process_count = 30;
2289 int process_count = 0;
2291 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2293 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2294 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2295 /* DPCD 0x200 - 0x201 for downstream IRQ */
2296 dpcd_addr = DP_SINK_COUNT;
2298 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2299 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2300 dpcd_addr = DP_SINK_COUNT_ESI;
2303 dret = drm_dp_dpcd_read(
2304 &aconnector->dm_dp_aux.aux,
2307 dpcd_bytes_to_read);
2309 while (dret == dpcd_bytes_to_read &&
2310 process_count < max_process_count) {
2316 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2317 /* handle HPD short pulse irq */
2318 if (aconnector->mst_mgr.mst_state)
2320 &aconnector->mst_mgr,
2324 if (new_irq_handled) {
2325 /* ACK at DPCD to notify down stream */
2326 const int ack_dpcd_bytes_to_write =
2327 dpcd_bytes_to_read - 1;
2329 for (retry = 0; retry < 3; retry++) {
2332 wret = drm_dp_dpcd_write(
2333 &aconnector->dm_dp_aux.aux,
2336 ack_dpcd_bytes_to_write);
2337 if (wret == ack_dpcd_bytes_to_write)
2341 /* check if there is new irq to be handled */
2342 dret = drm_dp_dpcd_read(
2343 &aconnector->dm_dp_aux.aux,
2346 dpcd_bytes_to_read);
2348 new_irq_handled = false;
2354 if (process_count == max_process_count)
2355 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2358 static void handle_hpd_rx_irq(void *param)
2360 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2361 struct drm_connector *connector = &aconnector->base;
2362 struct drm_device *dev = connector->dev;
2363 struct dc_link *dc_link = aconnector->dc_link;
2364 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2365 enum dc_connection_type new_connection_type = dc_connection_none;
2366 #ifdef CONFIG_DRM_AMD_DC_HDCP
2367 union hpd_irq_data hpd_irq_data;
2368 struct amdgpu_device *adev = dev->dev_private;
2370 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2374 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2375 * conflict, after implement i2c helper, this mutex should be
2378 if (dc_link->type != dc_connection_mst_branch)
2379 mutex_lock(&aconnector->hpd_lock);
2382 #ifdef CONFIG_DRM_AMD_DC_HDCP
2383 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2385 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2387 !is_mst_root_connector) {
2388 /* Downstream Port status changed. */
2389 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2390 DRM_ERROR("KMS: Failed to detect connector\n");
2392 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2393 emulated_link_detect(dc_link);
2395 if (aconnector->fake_enable)
2396 aconnector->fake_enable = false;
2398 amdgpu_dm_update_connector_after_detect(aconnector);
2401 drm_modeset_lock_all(dev);
2402 dm_restore_drm_connector_state(dev, connector);
2403 drm_modeset_unlock_all(dev);
2405 drm_kms_helper_hotplug_event(dev);
2406 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2408 if (aconnector->fake_enable)
2409 aconnector->fake_enable = false;
2411 amdgpu_dm_update_connector_after_detect(aconnector);
2414 drm_modeset_lock_all(dev);
2415 dm_restore_drm_connector_state(dev, connector);
2416 drm_modeset_unlock_all(dev);
2418 drm_kms_helper_hotplug_event(dev);
2421 #ifdef CONFIG_DRM_AMD_DC_HDCP
2422 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2423 if (adev->dm.hdcp_workqueue)
2424 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2427 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2428 (dc_link->type == dc_connection_mst_branch))
2429 dm_handle_hpd_rx_irq(aconnector);
2431 if (dc_link->type != dc_connection_mst_branch) {
2432 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2433 mutex_unlock(&aconnector->hpd_lock);
2437 static void register_hpd_handlers(struct amdgpu_device *adev)
2439 struct drm_device *dev = adev->ddev;
2440 struct drm_connector *connector;
2441 struct amdgpu_dm_connector *aconnector;
2442 const struct dc_link *dc_link;
2443 struct dc_interrupt_params int_params = {0};
2445 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2446 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2448 list_for_each_entry(connector,
2449 &dev->mode_config.connector_list, head) {
2451 aconnector = to_amdgpu_dm_connector(connector);
2452 dc_link = aconnector->dc_link;
2454 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2455 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2456 int_params.irq_source = dc_link->irq_source_hpd;
2458 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2460 (void *) aconnector);
2463 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2465 /* Also register for DP short pulse (hpd_rx). */
2466 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2467 int_params.irq_source = dc_link->irq_source_hpd_rx;
2469 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2471 (void *) aconnector);
2476 /* Register IRQ sources and initialize IRQ callbacks */
2477 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2479 struct dc *dc = adev->dm.dc;
2480 struct common_irq_params *c_irq_params;
2481 struct dc_interrupt_params int_params = {0};
2484 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2486 if (adev->asic_type >= CHIP_VEGA10)
2487 client_id = SOC15_IH_CLIENTID_DCE;
2489 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2490 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2493 * Actions of amdgpu_irq_add_id():
2494 * 1. Register a set() function with base driver.
2495 * Base driver will call set() function to enable/disable an
2496 * interrupt in DC hardware.
2497 * 2. Register amdgpu_dm_irq_handler().
2498 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2499 * coming from DC hardware.
2500 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2501 * for acknowledging and handling. */
2503 /* Use VBLANK interrupt */
2504 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2505 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2507 DRM_ERROR("Failed to add crtc irq id!\n");
2511 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2512 int_params.irq_source =
2513 dc_interrupt_to_irq_source(dc, i, 0);
2515 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2517 c_irq_params->adev = adev;
2518 c_irq_params->irq_src = int_params.irq_source;
2520 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2521 dm_crtc_high_irq, c_irq_params);
2524 /* Use VUPDATE interrupt */
2525 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2526 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2528 DRM_ERROR("Failed to add vupdate irq id!\n");
2532 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2533 int_params.irq_source =
2534 dc_interrupt_to_irq_source(dc, i, 0);
2536 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2538 c_irq_params->adev = adev;
2539 c_irq_params->irq_src = int_params.irq_source;
2541 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2542 dm_vupdate_high_irq, c_irq_params);
2545 /* Use GRPH_PFLIP interrupt */
2546 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2547 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2548 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2550 DRM_ERROR("Failed to add page flip irq id!\n");
2554 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2555 int_params.irq_source =
2556 dc_interrupt_to_irq_source(dc, i, 0);
2558 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2560 c_irq_params->adev = adev;
2561 c_irq_params->irq_src = int_params.irq_source;
2563 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2564 dm_pflip_high_irq, c_irq_params);
2569 r = amdgpu_irq_add_id(adev, client_id,
2570 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2572 DRM_ERROR("Failed to add hpd irq id!\n");
2576 register_hpd_handlers(adev);
2581 #if defined(CONFIG_DRM_AMD_DC_DCN)
2582 /* Register IRQ sources and initialize IRQ callbacks */
2583 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2585 struct dc *dc = adev->dm.dc;
2586 struct common_irq_params *c_irq_params;
2587 struct dc_interrupt_params int_params = {0};
2591 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2592 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2595 * Actions of amdgpu_irq_add_id():
2596 * 1. Register a set() function with base driver.
2597 * Base driver will call set() function to enable/disable an
2598 * interrupt in DC hardware.
2599 * 2. Register amdgpu_dm_irq_handler().
2600 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2601 * coming from DC hardware.
2602 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2603 * for acknowledging and handling.
2606 /* Use VSTARTUP interrupt */
2607 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2608 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2610 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2613 DRM_ERROR("Failed to add crtc irq id!\n");
2617 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2618 int_params.irq_source =
2619 dc_interrupt_to_irq_source(dc, i, 0);
2621 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2623 c_irq_params->adev = adev;
2624 c_irq_params->irq_src = int_params.irq_source;
2626 amdgpu_dm_irq_register_interrupt(
2627 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2630 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2631 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2632 * to trigger at end of each vblank, regardless of state of the lock,
2633 * matching DCE behaviour.
2635 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2636 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2638 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2641 DRM_ERROR("Failed to add vupdate irq id!\n");
2645 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2646 int_params.irq_source =
2647 dc_interrupt_to_irq_source(dc, i, 0);
2649 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2651 c_irq_params->adev = adev;
2652 c_irq_params->irq_src = int_params.irq_source;
2654 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2655 dm_vupdate_high_irq, c_irq_params);
2658 /* Use GRPH_PFLIP interrupt */
2659 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2660 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2662 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2664 DRM_ERROR("Failed to add page flip irq id!\n");
2668 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2669 int_params.irq_source =
2670 dc_interrupt_to_irq_source(dc, i, 0);
2672 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2674 c_irq_params->adev = adev;
2675 c_irq_params->irq_src = int_params.irq_source;
2677 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2678 dm_pflip_high_irq, c_irq_params);
2683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2686 DRM_ERROR("Failed to add hpd irq id!\n");
2690 register_hpd_handlers(adev);
2697 * Acquires the lock for the atomic state object and returns
2698 * the new atomic state.
2700 * This should only be called during atomic check.
2702 static int dm_atomic_get_state(struct drm_atomic_state *state,
2703 struct dm_atomic_state **dm_state)
2705 struct drm_device *dev = state->dev;
2706 struct amdgpu_device *adev = dev->dev_private;
2707 struct amdgpu_display_manager *dm = &adev->dm;
2708 struct drm_private_state *priv_state;
2713 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2714 if (IS_ERR(priv_state))
2715 return PTR_ERR(priv_state);
2717 *dm_state = to_dm_atomic_state(priv_state);
2722 static struct dm_atomic_state *
2723 dm_atomic_get_new_state(struct drm_atomic_state *state)
2725 struct drm_device *dev = state->dev;
2726 struct amdgpu_device *adev = dev->dev_private;
2727 struct amdgpu_display_manager *dm = &adev->dm;
2728 struct drm_private_obj *obj;
2729 struct drm_private_state *new_obj_state;
2732 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2733 if (obj->funcs == dm->atomic_obj.funcs)
2734 return to_dm_atomic_state(new_obj_state);
2740 static struct dm_atomic_state *
2741 dm_atomic_get_old_state(struct drm_atomic_state *state)
2743 struct drm_device *dev = state->dev;
2744 struct amdgpu_device *adev = dev->dev_private;
2745 struct amdgpu_display_manager *dm = &adev->dm;
2746 struct drm_private_obj *obj;
2747 struct drm_private_state *old_obj_state;
2750 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2751 if (obj->funcs == dm->atomic_obj.funcs)
2752 return to_dm_atomic_state(old_obj_state);
2758 static struct drm_private_state *
2759 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2761 struct dm_atomic_state *old_state, *new_state;
2763 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2767 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2769 old_state = to_dm_atomic_state(obj->state);
2771 if (old_state && old_state->context)
2772 new_state->context = dc_copy_state(old_state->context);
2774 if (!new_state->context) {
2779 return &new_state->base;
2782 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2783 struct drm_private_state *state)
2785 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2787 if (dm_state && dm_state->context)
2788 dc_release_state(dm_state->context);
2793 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2794 .atomic_duplicate_state = dm_atomic_duplicate_state,
2795 .atomic_destroy_state = dm_atomic_destroy_state,
2798 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2800 struct dm_atomic_state *state;
2803 adev->mode_info.mode_config_initialized = true;
2805 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2806 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2808 adev->ddev->mode_config.max_width = 16384;
2809 adev->ddev->mode_config.max_height = 16384;
2811 adev->ddev->mode_config.preferred_depth = 24;
2812 adev->ddev->mode_config.prefer_shadow = 1;
2813 /* indicates support for immediate flip */
2814 adev->ddev->mode_config.async_page_flip = true;
2816 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2818 state = kzalloc(sizeof(*state), GFP_KERNEL);
2822 state->context = dc_create_state(adev->dm.dc);
2823 if (!state->context) {
2828 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2830 drm_atomic_private_obj_init(adev->ddev,
2831 &adev->dm.atomic_obj,
2833 &dm_atomic_state_funcs);
2835 r = amdgpu_display_modeset_create_props(adev);
2839 r = amdgpu_dm_audio_init(adev);
2846 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2847 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2848 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2850 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2851 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2853 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2855 #if defined(CONFIG_ACPI)
2856 struct amdgpu_dm_backlight_caps caps;
2858 if (dm->backlight_caps.caps_valid)
2861 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2862 if (caps.caps_valid) {
2863 dm->backlight_caps.caps_valid = true;
2864 if (caps.aux_support)
2866 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2867 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2869 dm->backlight_caps.min_input_signal =
2870 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2871 dm->backlight_caps.max_input_signal =
2872 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2875 if (dm->backlight_caps.aux_support)
2878 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2879 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2883 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2890 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2891 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2896 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2897 const uint32_t user_brightness)
2899 u32 min, max, conversion_pace;
2900 u32 brightness = user_brightness;
2905 if (!caps->aux_support) {
2906 max = caps->max_input_signal;
2907 min = caps->min_input_signal;
2909 * The brightness input is in the range 0-255
2910 * It needs to be rescaled to be between the
2911 * requested min and max input signal
2912 * It also needs to be scaled up by 0x101 to
2913 * match the DC interface which has a range of
2916 conversion_pace = 0x101;
2921 / AMDGPU_MAX_BL_LEVEL
2922 + min * conversion_pace;
2925 * We are doing a linear interpolation here, which is OK but
2926 * does not provide the optimal result. We probably want
2927 * something close to the Perceptual Quantizer (PQ) curve.
2929 max = caps->aux_max_input_signal;
2930 min = caps->aux_min_input_signal;
2932 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2933 + user_brightness * max;
2934 // Multiple the value by 1000 since we use millinits
2936 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2943 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2945 struct amdgpu_display_manager *dm = bl_get_data(bd);
2946 struct amdgpu_dm_backlight_caps caps;
2947 struct dc_link *link = NULL;
2951 amdgpu_dm_update_backlight_caps(dm);
2952 caps = dm->backlight_caps;
2954 link = (struct dc_link *)dm->backlight_link;
2956 brightness = convert_brightness(&caps, bd->props.brightness);
2957 // Change brightness based on AUX property
2958 if (caps.aux_support)
2959 return set_backlight_via_aux(link, brightness);
2961 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2966 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2968 struct amdgpu_display_manager *dm = bl_get_data(bd);
2969 int ret = dc_link_get_backlight_level(dm->backlight_link);
2971 if (ret == DC_ERROR_UNEXPECTED)
2972 return bd->props.brightness;
2976 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2977 .options = BL_CORE_SUSPENDRESUME,
2978 .get_brightness = amdgpu_dm_backlight_get_brightness,
2979 .update_status = amdgpu_dm_backlight_update_status,
2983 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2986 struct backlight_properties props = { 0 };
2988 amdgpu_dm_update_backlight_caps(dm);
2990 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2991 props.brightness = AMDGPU_MAX_BL_LEVEL;
2992 props.type = BACKLIGHT_RAW;
2994 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2995 dm->adev->ddev->primary->index);
2997 dm->backlight_dev = backlight_device_register(bl_name,
2998 dm->adev->ddev->dev,
3000 &amdgpu_dm_backlight_ops,
3003 if (IS_ERR(dm->backlight_dev))
3004 DRM_ERROR("DM: Backlight registration failed!\n");
3006 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3011 static int initialize_plane(struct amdgpu_display_manager *dm,
3012 struct amdgpu_mode_info *mode_info, int plane_id,
3013 enum drm_plane_type plane_type,
3014 const struct dc_plane_cap *plane_cap)
3016 struct drm_plane *plane;
3017 unsigned long possible_crtcs;
3020 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3022 DRM_ERROR("KMS: Failed to allocate plane\n");
3025 plane->type = plane_type;
3028 * HACK: IGT tests expect that the primary plane for a CRTC
3029 * can only have one possible CRTC. Only expose support for
3030 * any CRTC if they're not going to be used as a primary plane
3031 * for a CRTC - like overlay or underlay planes.
3033 possible_crtcs = 1 << plane_id;
3034 if (plane_id >= dm->dc->caps.max_streams)
3035 possible_crtcs = 0xff;
3037 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3040 DRM_ERROR("KMS: Failed to initialize plane\n");
3046 mode_info->planes[plane_id] = plane;
3052 static void register_backlight_device(struct amdgpu_display_manager *dm,
3053 struct dc_link *link)
3055 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3056 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3058 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3059 link->type != dc_connection_none) {
3061 * Event if registration failed, we should continue with
3062 * DM initialization because not having a backlight control
3063 * is better then a black screen.
3065 amdgpu_dm_register_backlight_device(dm);
3067 if (dm->backlight_dev)
3068 dm->backlight_link = link;
3075 * In this architecture, the association
3076 * connector -> encoder -> crtc
3077 * id not really requried. The crtc and connector will hold the
3078 * display_index as an abstraction to use with DAL component
3080 * Returns 0 on success
3082 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3084 struct amdgpu_display_manager *dm = &adev->dm;
3086 struct amdgpu_dm_connector *aconnector = NULL;
3087 struct amdgpu_encoder *aencoder = NULL;
3088 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3090 int32_t primary_planes;
3091 enum dc_connection_type new_connection_type = dc_connection_none;
3092 const struct dc_plane_cap *plane;
3094 link_cnt = dm->dc->caps.max_links;
3095 if (amdgpu_dm_mode_config_init(dm->adev)) {
3096 DRM_ERROR("DM: Failed to initialize mode config\n");
3100 /* There is one primary plane per CRTC */
3101 primary_planes = dm->dc->caps.max_streams;
3102 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3105 * Initialize primary planes, implicit planes for legacy IOCTLS.
3106 * Order is reversed to match iteration order in atomic check.
3108 for (i = (primary_planes - 1); i >= 0; i--) {
3109 plane = &dm->dc->caps.planes[i];
3111 if (initialize_plane(dm, mode_info, i,
3112 DRM_PLANE_TYPE_PRIMARY, plane)) {
3113 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3119 * Initialize overlay planes, index starting after primary planes.
3120 * These planes have a higher DRM index than the primary planes since
3121 * they should be considered as having a higher z-order.
3122 * Order is reversed to match iteration order in atomic check.
3124 * Only support DCN for now, and only expose one so we don't encourage
3125 * userspace to use up all the pipes.
3127 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3128 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3130 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3133 if (!plane->blends_with_above || !plane->blends_with_below)
3136 if (!plane->pixel_format_support.argb8888)
3139 if (initialize_plane(dm, NULL, primary_planes + i,
3140 DRM_PLANE_TYPE_OVERLAY, plane)) {
3141 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3145 /* Only create one overlay plane. */
3149 for (i = 0; i < dm->dc->caps.max_streams; i++)
3150 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3151 DRM_ERROR("KMS: Failed to initialize crtc\n");
3155 dm->display_indexes_num = dm->dc->caps.max_streams;
3157 /* loops over all connectors on the board */
3158 for (i = 0; i < link_cnt; i++) {
3159 struct dc_link *link = NULL;
3161 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3163 "KMS: Cannot support more than %d display indexes\n",
3164 AMDGPU_DM_MAX_DISPLAY_INDEX);
3168 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3172 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3176 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3177 DRM_ERROR("KMS: Failed to initialize encoder\n");
3181 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3182 DRM_ERROR("KMS: Failed to initialize connector\n");
3186 link = dc_get_link_at_index(dm->dc, i);
3188 if (!dc_link_detect_sink(link, &new_connection_type))
3189 DRM_ERROR("KMS: Failed to detect connector\n");
3191 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3192 emulated_link_detect(link);
3193 amdgpu_dm_update_connector_after_detect(aconnector);
3195 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3196 amdgpu_dm_update_connector_after_detect(aconnector);
3197 register_backlight_device(dm, link);
3198 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3199 amdgpu_dm_set_psr_caps(link);
3205 /* Software is initialized. Now we can register interrupt handlers. */
3206 switch (adev->asic_type) {
3216 case CHIP_POLARIS11:
3217 case CHIP_POLARIS10:
3218 case CHIP_POLARIS12:
3223 if (dce110_register_irq_handlers(dm->adev)) {
3224 DRM_ERROR("DM: Failed to initialize IRQ\n");
3228 #if defined(CONFIG_DRM_AMD_DC_DCN)
3234 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3235 case CHIP_SIENNA_CICHLID:
3236 case CHIP_NAVY_FLOUNDER:
3238 if (dcn10_register_irq_handlers(dm->adev)) {
3239 DRM_ERROR("DM: Failed to initialize IRQ\n");
3245 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3249 /* No userspace support. */
3250 dm->dc->debug.disable_tri_buf = true;
3260 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3262 drm_mode_config_cleanup(dm->ddev);
3263 drm_atomic_private_obj_fini(&dm->atomic_obj);
3267 /******************************************************************************
3268 * amdgpu_display_funcs functions
3269 *****************************************************************************/
3272 * dm_bandwidth_update - program display watermarks
3274 * @adev: amdgpu_device pointer
3276 * Calculate and program the display watermarks and line buffer allocation.
3278 static void dm_bandwidth_update(struct amdgpu_device *adev)
3280 /* TODO: implement later */
3283 static const struct amdgpu_display_funcs dm_display_funcs = {
3284 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3285 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3286 .backlight_set_level = NULL, /* never called for DC */
3287 .backlight_get_level = NULL, /* never called for DC */
3288 .hpd_sense = NULL,/* called unconditionally */
3289 .hpd_set_polarity = NULL, /* called unconditionally */
3290 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3291 .page_flip_get_scanoutpos =
3292 dm_crtc_get_scanoutpos,/* called unconditionally */
3293 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3294 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3297 #if defined(CONFIG_DEBUG_KERNEL_DC)
3299 static ssize_t s3_debug_store(struct device *device,
3300 struct device_attribute *attr,
3306 struct drm_device *drm_dev = dev_get_drvdata(device);
3307 struct amdgpu_device *adev = drm_dev->dev_private;
3309 ret = kstrtoint(buf, 0, &s3_state);
3314 drm_kms_helper_hotplug_event(adev->ddev);
3319 return ret == 0 ? count : 0;
3322 DEVICE_ATTR_WO(s3_debug);
3326 static int dm_early_init(void *handle)
3328 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3330 switch (adev->asic_type) {
3333 adev->mode_info.num_crtc = 6;
3334 adev->mode_info.num_hpd = 6;
3335 adev->mode_info.num_dig = 6;
3338 adev->mode_info.num_crtc = 4;
3339 adev->mode_info.num_hpd = 6;
3340 adev->mode_info.num_dig = 7;
3344 adev->mode_info.num_crtc = 2;
3345 adev->mode_info.num_hpd = 6;
3346 adev->mode_info.num_dig = 6;
3350 adev->mode_info.num_crtc = 6;
3351 adev->mode_info.num_hpd = 6;
3352 adev->mode_info.num_dig = 7;
3355 adev->mode_info.num_crtc = 3;
3356 adev->mode_info.num_hpd = 6;
3357 adev->mode_info.num_dig = 9;
3360 adev->mode_info.num_crtc = 2;
3361 adev->mode_info.num_hpd = 6;
3362 adev->mode_info.num_dig = 9;
3364 case CHIP_POLARIS11:
3365 case CHIP_POLARIS12:
3366 adev->mode_info.num_crtc = 5;
3367 adev->mode_info.num_hpd = 5;
3368 adev->mode_info.num_dig = 5;
3370 case CHIP_POLARIS10:
3372 adev->mode_info.num_crtc = 6;
3373 adev->mode_info.num_hpd = 6;
3374 adev->mode_info.num_dig = 6;
3379 adev->mode_info.num_crtc = 6;
3380 adev->mode_info.num_hpd = 6;
3381 adev->mode_info.num_dig = 6;
3383 #if defined(CONFIG_DRM_AMD_DC_DCN)
3385 adev->mode_info.num_crtc = 4;
3386 adev->mode_info.num_hpd = 4;
3387 adev->mode_info.num_dig = 4;
3392 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3393 case CHIP_SIENNA_CICHLID:
3394 case CHIP_NAVY_FLOUNDER:
3396 adev->mode_info.num_crtc = 6;
3397 adev->mode_info.num_hpd = 6;
3398 adev->mode_info.num_dig = 6;
3401 adev->mode_info.num_crtc = 5;
3402 adev->mode_info.num_hpd = 5;
3403 adev->mode_info.num_dig = 5;
3406 adev->mode_info.num_crtc = 4;
3407 adev->mode_info.num_hpd = 4;
3408 adev->mode_info.num_dig = 4;
3411 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3415 amdgpu_dm_set_irq_funcs(adev);
3417 if (adev->mode_info.funcs == NULL)
3418 adev->mode_info.funcs = &dm_display_funcs;
3421 * Note: Do NOT change adev->audio_endpt_rreg and
3422 * adev->audio_endpt_wreg because they are initialised in
3423 * amdgpu_device_init()
3425 #if defined(CONFIG_DEBUG_KERNEL_DC)
3428 &dev_attr_s3_debug);
3434 static bool modeset_required(struct drm_crtc_state *crtc_state,
3435 struct dc_stream_state *new_stream,
3436 struct dc_stream_state *old_stream)
3438 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3441 if (!crtc_state->enable)
3444 return crtc_state->active;
3447 static bool modereset_required(struct drm_crtc_state *crtc_state)
3449 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3452 return !crtc_state->enable || !crtc_state->active;
3455 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3457 drm_encoder_cleanup(encoder);
3461 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3462 .destroy = amdgpu_dm_encoder_destroy,
3466 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3467 struct dc_scaling_info *scaling_info)
3469 int scale_w, scale_h;
3471 memset(scaling_info, 0, sizeof(*scaling_info));
3473 /* Source is fixed 16.16 but we ignore mantissa for now... */
3474 scaling_info->src_rect.x = state->src_x >> 16;
3475 scaling_info->src_rect.y = state->src_y >> 16;
3477 scaling_info->src_rect.width = state->src_w >> 16;
3478 if (scaling_info->src_rect.width == 0)
3481 scaling_info->src_rect.height = state->src_h >> 16;
3482 if (scaling_info->src_rect.height == 0)
3485 scaling_info->dst_rect.x = state->crtc_x;
3486 scaling_info->dst_rect.y = state->crtc_y;
3488 if (state->crtc_w == 0)
3491 scaling_info->dst_rect.width = state->crtc_w;
3493 if (state->crtc_h == 0)
3496 scaling_info->dst_rect.height = state->crtc_h;
3498 /* DRM doesn't specify clipping on destination output. */
3499 scaling_info->clip_rect = scaling_info->dst_rect;
3501 /* TODO: Validate scaling per-format with DC plane caps */
3502 scale_w = scaling_info->dst_rect.width * 1000 /
3503 scaling_info->src_rect.width;
3505 if (scale_w < 250 || scale_w > 16000)
3508 scale_h = scaling_info->dst_rect.height * 1000 /
3509 scaling_info->src_rect.height;
3511 if (scale_h < 250 || scale_h > 16000)
3515 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3516 * assume reasonable defaults based on the format.
3522 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3523 uint64_t *tiling_flags, bool *tmz_surface)
3525 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3526 int r = amdgpu_bo_reserve(rbo, false);
3529 /* Don't show error message when returning -ERESTARTSYS */
3530 if (r != -ERESTARTSYS)
3531 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3536 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3539 *tmz_surface = amdgpu_bo_encrypted(rbo);
3541 amdgpu_bo_unreserve(rbo);
3546 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3548 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3550 return offset ? (address + offset * 256) : 0;
3554 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3555 const struct amdgpu_framebuffer *afb,
3556 const enum surface_pixel_format format,
3557 const enum dc_rotation_angle rotation,
3558 const struct plane_size *plane_size,
3559 const union dc_tiling_info *tiling_info,
3560 const uint64_t info,
3561 struct dc_plane_dcc_param *dcc,
3562 struct dc_plane_address *address,
3563 bool force_disable_dcc)
3565 struct dc *dc = adev->dm.dc;
3566 struct dc_dcc_surface_param input;
3567 struct dc_surface_dcc_cap output;
3568 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3569 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3570 uint64_t dcc_address;
3572 memset(&input, 0, sizeof(input));
3573 memset(&output, 0, sizeof(output));
3575 if (force_disable_dcc)
3581 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3584 if (!dc->cap_funcs.get_dcc_compression_cap)
3587 input.format = format;
3588 input.surface_size.width = plane_size->surface_size.width;
3589 input.surface_size.height = plane_size->surface_size.height;
3590 input.swizzle_mode = tiling_info->gfx9.swizzle;
3592 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3593 input.scan = SCAN_DIRECTION_HORIZONTAL;
3594 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3595 input.scan = SCAN_DIRECTION_VERTICAL;
3597 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3600 if (!output.capable)
3603 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3608 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3609 dcc->independent_64b_blks = i64b;
3611 dcc_address = get_dcc_address(afb->address, info);
3612 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3613 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3619 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3620 const struct amdgpu_framebuffer *afb,
3621 const enum surface_pixel_format format,
3622 const enum dc_rotation_angle rotation,
3623 const uint64_t tiling_flags,
3624 union dc_tiling_info *tiling_info,
3625 struct plane_size *plane_size,
3626 struct dc_plane_dcc_param *dcc,
3627 struct dc_plane_address *address,
3629 bool force_disable_dcc)
3631 const struct drm_framebuffer *fb = &afb->base;
3634 memset(tiling_info, 0, sizeof(*tiling_info));
3635 memset(plane_size, 0, sizeof(*plane_size));
3636 memset(dcc, 0, sizeof(*dcc));
3637 memset(address, 0, sizeof(*address));
3639 address->tmz_surface = tmz_surface;
3641 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3642 plane_size->surface_size.x = 0;
3643 plane_size->surface_size.y = 0;
3644 plane_size->surface_size.width = fb->width;
3645 plane_size->surface_size.height = fb->height;
3646 plane_size->surface_pitch =
3647 fb->pitches[0] / fb->format->cpp[0];
3649 address->type = PLN_ADDR_TYPE_GRAPHICS;
3650 address->grph.addr.low_part = lower_32_bits(afb->address);
3651 address->grph.addr.high_part = upper_32_bits(afb->address);
3652 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3653 uint64_t chroma_addr = afb->address + fb->offsets[1];
3655 plane_size->surface_size.x = 0;
3656 plane_size->surface_size.y = 0;
3657 plane_size->surface_size.width = fb->width;
3658 plane_size->surface_size.height = fb->height;
3659 plane_size->surface_pitch =
3660 fb->pitches[0] / fb->format->cpp[0];
3662 plane_size->chroma_size.x = 0;
3663 plane_size->chroma_size.y = 0;
3664 /* TODO: set these based on surface format */
3665 plane_size->chroma_size.width = fb->width / 2;
3666 plane_size->chroma_size.height = fb->height / 2;
3668 plane_size->chroma_pitch =
3669 fb->pitches[1] / fb->format->cpp[1];
3671 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3672 address->video_progressive.luma_addr.low_part =
3673 lower_32_bits(afb->address);
3674 address->video_progressive.luma_addr.high_part =
3675 upper_32_bits(afb->address);
3676 address->video_progressive.chroma_addr.low_part =
3677 lower_32_bits(chroma_addr);
3678 address->video_progressive.chroma_addr.high_part =
3679 upper_32_bits(chroma_addr);
3682 /* Fill GFX8 params */
3683 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3684 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3686 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3687 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3688 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3689 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3690 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3692 /* XXX fix me for VI */
3693 tiling_info->gfx8.num_banks = num_banks;
3694 tiling_info->gfx8.array_mode =
3695 DC_ARRAY_2D_TILED_THIN1;
3696 tiling_info->gfx8.tile_split = tile_split;
3697 tiling_info->gfx8.bank_width = bankw;
3698 tiling_info->gfx8.bank_height = bankh;
3699 tiling_info->gfx8.tile_aspect = mtaspect;
3700 tiling_info->gfx8.tile_mode =
3701 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3702 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3703 == DC_ARRAY_1D_TILED_THIN1) {
3704 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3707 tiling_info->gfx8.pipe_config =
3708 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3710 if (adev->asic_type == CHIP_VEGA10 ||
3711 adev->asic_type == CHIP_VEGA12 ||
3712 adev->asic_type == CHIP_VEGA20 ||
3713 adev->asic_type == CHIP_NAVI10 ||
3714 adev->asic_type == CHIP_NAVI14 ||
3715 adev->asic_type == CHIP_NAVI12 ||
3716 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3717 adev->asic_type == CHIP_SIENNA_CICHLID ||
3718 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3720 adev->asic_type == CHIP_RENOIR ||
3721 adev->asic_type == CHIP_RAVEN) {
3722 /* Fill GFX9 params */
3723 tiling_info->gfx9.num_pipes =
3724 adev->gfx.config.gb_addr_config_fields.num_pipes;
3725 tiling_info->gfx9.num_banks =
3726 adev->gfx.config.gb_addr_config_fields.num_banks;
3727 tiling_info->gfx9.pipe_interleave =
3728 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3729 tiling_info->gfx9.num_shader_engines =
3730 adev->gfx.config.gb_addr_config_fields.num_se;
3731 tiling_info->gfx9.max_compressed_frags =
3732 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3733 tiling_info->gfx9.num_rb_per_se =
3734 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3735 tiling_info->gfx9.swizzle =
3736 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3737 tiling_info->gfx9.shaderEnable = 1;
3739 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3740 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3741 adev->asic_type == CHIP_NAVY_FLOUNDER)
3742 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3744 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3745 plane_size, tiling_info,
3746 tiling_flags, dcc, address,
3756 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3757 bool *per_pixel_alpha, bool *global_alpha,
3758 int *global_alpha_value)
3760 *per_pixel_alpha = false;
3761 *global_alpha = false;
3762 *global_alpha_value = 0xff;
3764 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3767 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3768 static const uint32_t alpha_formats[] = {
3769 DRM_FORMAT_ARGB8888,
3770 DRM_FORMAT_RGBA8888,
3771 DRM_FORMAT_ABGR8888,
3773 uint32_t format = plane_state->fb->format->format;
3776 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3777 if (format == alpha_formats[i]) {
3778 *per_pixel_alpha = true;
3784 if (plane_state->alpha < 0xffff) {
3785 *global_alpha = true;
3786 *global_alpha_value = plane_state->alpha >> 8;
3791 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3792 const enum surface_pixel_format format,
3793 enum dc_color_space *color_space)
3797 *color_space = COLOR_SPACE_SRGB;
3799 /* DRM color properties only affect non-RGB formats. */
3800 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3803 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3805 switch (plane_state->color_encoding) {
3806 case DRM_COLOR_YCBCR_BT601:
3808 *color_space = COLOR_SPACE_YCBCR601;
3810 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3813 case DRM_COLOR_YCBCR_BT709:
3815 *color_space = COLOR_SPACE_YCBCR709;
3817 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3820 case DRM_COLOR_YCBCR_BT2020:
3822 *color_space = COLOR_SPACE_2020_YCBCR;
3835 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3836 const struct drm_plane_state *plane_state,
3837 const uint64_t tiling_flags,
3838 struct dc_plane_info *plane_info,
3839 struct dc_plane_address *address,
3841 bool force_disable_dcc)
3843 const struct drm_framebuffer *fb = plane_state->fb;
3844 const struct amdgpu_framebuffer *afb =
3845 to_amdgpu_framebuffer(plane_state->fb);
3846 struct drm_format_name_buf format_name;
3849 memset(plane_info, 0, sizeof(*plane_info));
3851 switch (fb->format->format) {
3853 plane_info->format =
3854 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3856 case DRM_FORMAT_RGB565:
3857 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3859 case DRM_FORMAT_XRGB8888:
3860 case DRM_FORMAT_ARGB8888:
3861 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3863 case DRM_FORMAT_XRGB2101010:
3864 case DRM_FORMAT_ARGB2101010:
3865 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3867 case DRM_FORMAT_XBGR2101010:
3868 case DRM_FORMAT_ABGR2101010:
3869 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3871 case DRM_FORMAT_XBGR8888:
3872 case DRM_FORMAT_ABGR8888:
3873 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3875 case DRM_FORMAT_NV21:
3876 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3878 case DRM_FORMAT_NV12:
3879 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3881 case DRM_FORMAT_P010:
3882 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3884 case DRM_FORMAT_XRGB16161616F:
3885 case DRM_FORMAT_ARGB16161616F:
3886 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3888 case DRM_FORMAT_XBGR16161616F:
3889 case DRM_FORMAT_ABGR16161616F:
3890 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3894 "Unsupported screen format %s\n",
3895 drm_get_format_name(fb->format->format, &format_name));
3899 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3900 case DRM_MODE_ROTATE_0:
3901 plane_info->rotation = ROTATION_ANGLE_0;
3903 case DRM_MODE_ROTATE_90:
3904 plane_info->rotation = ROTATION_ANGLE_90;
3906 case DRM_MODE_ROTATE_180:
3907 plane_info->rotation = ROTATION_ANGLE_180;
3909 case DRM_MODE_ROTATE_270:
3910 plane_info->rotation = ROTATION_ANGLE_270;
3913 plane_info->rotation = ROTATION_ANGLE_0;
3917 plane_info->visible = true;
3918 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3920 plane_info->layer_index = 0;
3922 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3923 &plane_info->color_space);
3927 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3928 plane_info->rotation, tiling_flags,
3929 &plane_info->tiling_info,
3930 &plane_info->plane_size,
3931 &plane_info->dcc, address, tmz_surface,
3936 fill_blending_from_plane_state(
3937 plane_state, &plane_info->per_pixel_alpha,
3938 &plane_info->global_alpha, &plane_info->global_alpha_value);
3943 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3944 struct dc_plane_state *dc_plane_state,
3945 struct drm_plane_state *plane_state,
3946 struct drm_crtc_state *crtc_state)
3948 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3949 const struct amdgpu_framebuffer *amdgpu_fb =
3950 to_amdgpu_framebuffer(plane_state->fb);
3951 struct dc_scaling_info scaling_info;
3952 struct dc_plane_info plane_info;
3953 uint64_t tiling_flags;
3955 bool tmz_surface = false;
3956 bool force_disable_dcc = false;
3958 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3962 dc_plane_state->src_rect = scaling_info.src_rect;
3963 dc_plane_state->dst_rect = scaling_info.dst_rect;
3964 dc_plane_state->clip_rect = scaling_info.clip_rect;
3965 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3967 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3971 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3972 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3974 &dc_plane_state->address,
3980 dc_plane_state->format = plane_info.format;
3981 dc_plane_state->color_space = plane_info.color_space;
3982 dc_plane_state->format = plane_info.format;
3983 dc_plane_state->plane_size = plane_info.plane_size;
3984 dc_plane_state->rotation = plane_info.rotation;
3985 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3986 dc_plane_state->stereo_format = plane_info.stereo_format;
3987 dc_plane_state->tiling_info = plane_info.tiling_info;
3988 dc_plane_state->visible = plane_info.visible;
3989 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3990 dc_plane_state->global_alpha = plane_info.global_alpha;
3991 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3992 dc_plane_state->dcc = plane_info.dcc;
3993 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3996 * Always set input transfer function, since plane state is refreshed
3999 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4006 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4007 const struct dm_connector_state *dm_state,
4008 struct dc_stream_state *stream)
4010 enum amdgpu_rmx_type rmx_type;
4012 struct rect src = { 0 }; /* viewport in composition space*/
4013 struct rect dst = { 0 }; /* stream addressable area */
4015 /* no mode. nothing to be done */
4019 /* Full screen scaling by default */
4020 src.width = mode->hdisplay;
4021 src.height = mode->vdisplay;
4022 dst.width = stream->timing.h_addressable;
4023 dst.height = stream->timing.v_addressable;
4026 rmx_type = dm_state->scaling;
4027 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4028 if (src.width * dst.height <
4029 src.height * dst.width) {
4030 /* height needs less upscaling/more downscaling */
4031 dst.width = src.width *
4032 dst.height / src.height;
4034 /* width needs less upscaling/more downscaling */
4035 dst.height = src.height *
4036 dst.width / src.width;
4038 } else if (rmx_type == RMX_CENTER) {
4042 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4043 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4045 if (dm_state->underscan_enable) {
4046 dst.x += dm_state->underscan_hborder / 2;
4047 dst.y += dm_state->underscan_vborder / 2;
4048 dst.width -= dm_state->underscan_hborder;
4049 dst.height -= dm_state->underscan_vborder;
4056 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4057 dst.x, dst.y, dst.width, dst.height);
4061 static enum dc_color_depth
4062 convert_color_depth_from_display_info(const struct drm_connector *connector,
4063 bool is_y420, int requested_bpc)
4070 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4071 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4073 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4075 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4078 bpc = (uint8_t)connector->display_info.bpc;
4079 /* Assume 8 bpc by default if no bpc is specified. */
4080 bpc = bpc ? bpc : 8;
4083 if (requested_bpc > 0) {
4085 * Cap display bpc based on the user requested value.
4087 * The value for state->max_bpc may not correctly updated
4088 * depending on when the connector gets added to the state
4089 * or if this was called outside of atomic check, so it
4090 * can't be used directly.
4092 bpc = min_t(u8, bpc, requested_bpc);
4094 /* Round down to the nearest even number. */
4095 bpc = bpc - (bpc & 1);
4101 * Temporary Work around, DRM doesn't parse color depth for
4102 * EDID revision before 1.4
4103 * TODO: Fix edid parsing
4105 return COLOR_DEPTH_888;
4107 return COLOR_DEPTH_666;
4109 return COLOR_DEPTH_888;
4111 return COLOR_DEPTH_101010;
4113 return COLOR_DEPTH_121212;
4115 return COLOR_DEPTH_141414;
4117 return COLOR_DEPTH_161616;
4119 return COLOR_DEPTH_UNDEFINED;
4123 static enum dc_aspect_ratio
4124 get_aspect_ratio(const struct drm_display_mode *mode_in)
4126 /* 1-1 mapping, since both enums follow the HDMI spec. */
4127 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4130 static enum dc_color_space
4131 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4133 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4135 switch (dc_crtc_timing->pixel_encoding) {
4136 case PIXEL_ENCODING_YCBCR422:
4137 case PIXEL_ENCODING_YCBCR444:
4138 case PIXEL_ENCODING_YCBCR420:
4141 * 27030khz is the separation point between HDTV and SDTV
4142 * according to HDMI spec, we use YCbCr709 and YCbCr601
4145 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4146 if (dc_crtc_timing->flags.Y_ONLY)
4148 COLOR_SPACE_YCBCR709_LIMITED;
4150 color_space = COLOR_SPACE_YCBCR709;
4152 if (dc_crtc_timing->flags.Y_ONLY)
4154 COLOR_SPACE_YCBCR601_LIMITED;
4156 color_space = COLOR_SPACE_YCBCR601;
4161 case PIXEL_ENCODING_RGB:
4162 color_space = COLOR_SPACE_SRGB;
4173 static bool adjust_colour_depth_from_display_info(
4174 struct dc_crtc_timing *timing_out,
4175 const struct drm_display_info *info)
4177 enum dc_color_depth depth = timing_out->display_color_depth;
4180 normalized_clk = timing_out->pix_clk_100hz / 10;
4181 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4182 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4183 normalized_clk /= 2;
4184 /* Adjusting pix clock following on HDMI spec based on colour depth */
4186 case COLOR_DEPTH_888:
4188 case COLOR_DEPTH_101010:
4189 normalized_clk = (normalized_clk * 30) / 24;
4191 case COLOR_DEPTH_121212:
4192 normalized_clk = (normalized_clk * 36) / 24;
4194 case COLOR_DEPTH_161616:
4195 normalized_clk = (normalized_clk * 48) / 24;
4198 /* The above depths are the only ones valid for HDMI. */
4201 if (normalized_clk <= info->max_tmds_clock) {
4202 timing_out->display_color_depth = depth;
4205 } while (--depth > COLOR_DEPTH_666);
4209 static void fill_stream_properties_from_drm_display_mode(
4210 struct dc_stream_state *stream,
4211 const struct drm_display_mode *mode_in,
4212 const struct drm_connector *connector,
4213 const struct drm_connector_state *connector_state,
4214 const struct dc_stream_state *old_stream,
4217 struct dc_crtc_timing *timing_out = &stream->timing;
4218 const struct drm_display_info *info = &connector->display_info;
4219 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4220 struct hdmi_vendor_infoframe hv_frame;
4221 struct hdmi_avi_infoframe avi_frame;
4223 memset(&hv_frame, 0, sizeof(hv_frame));
4224 memset(&avi_frame, 0, sizeof(avi_frame));
4226 timing_out->h_border_left = 0;
4227 timing_out->h_border_right = 0;
4228 timing_out->v_border_top = 0;
4229 timing_out->v_border_bottom = 0;
4230 /* TODO: un-hardcode */
4231 if (drm_mode_is_420_only(info, mode_in)
4232 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4233 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4234 else if (drm_mode_is_420_also(info, mode_in)
4235 && aconnector->force_yuv420_output)
4236 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4237 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4238 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4239 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4241 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4243 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4244 timing_out->display_color_depth = convert_color_depth_from_display_info(
4246 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4248 timing_out->scan_type = SCANNING_TYPE_NODATA;
4249 timing_out->hdmi_vic = 0;
4252 timing_out->vic = old_stream->timing.vic;
4253 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4254 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4256 timing_out->vic = drm_match_cea_mode(mode_in);
4257 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4258 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4259 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4260 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4263 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4264 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4265 timing_out->vic = avi_frame.video_code;
4266 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4267 timing_out->hdmi_vic = hv_frame.vic;
4270 timing_out->h_addressable = mode_in->crtc_hdisplay;
4271 timing_out->h_total = mode_in->crtc_htotal;
4272 timing_out->h_sync_width =
4273 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4274 timing_out->h_front_porch =
4275 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4276 timing_out->v_total = mode_in->crtc_vtotal;
4277 timing_out->v_addressable = mode_in->crtc_vdisplay;
4278 timing_out->v_front_porch =
4279 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4280 timing_out->v_sync_width =
4281 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4282 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4283 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4285 stream->output_color_space = get_output_color_space(timing_out);
4287 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4288 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4289 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4290 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4291 drm_mode_is_420_also(info, mode_in) &&
4292 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4293 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4294 adjust_colour_depth_from_display_info(timing_out, info);
4299 static void fill_audio_info(struct audio_info *audio_info,
4300 const struct drm_connector *drm_connector,
4301 const struct dc_sink *dc_sink)
4304 int cea_revision = 0;
4305 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4307 audio_info->manufacture_id = edid_caps->manufacturer_id;
4308 audio_info->product_id = edid_caps->product_id;
4310 cea_revision = drm_connector->display_info.cea_rev;
4312 strscpy(audio_info->display_name,
4313 edid_caps->display_name,
4314 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4316 if (cea_revision >= 3) {
4317 audio_info->mode_count = edid_caps->audio_mode_count;
4319 for (i = 0; i < audio_info->mode_count; ++i) {
4320 audio_info->modes[i].format_code =
4321 (enum audio_format_code)
4322 (edid_caps->audio_modes[i].format_code);
4323 audio_info->modes[i].channel_count =
4324 edid_caps->audio_modes[i].channel_count;
4325 audio_info->modes[i].sample_rates.all =
4326 edid_caps->audio_modes[i].sample_rate;
4327 audio_info->modes[i].sample_size =
4328 edid_caps->audio_modes[i].sample_size;
4332 audio_info->flags.all = edid_caps->speaker_flags;
4334 /* TODO: We only check for the progressive mode, check for interlace mode too */
4335 if (drm_connector->latency_present[0]) {
4336 audio_info->video_latency = drm_connector->video_latency[0];
4337 audio_info->audio_latency = drm_connector->audio_latency[0];
4340 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4345 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4346 struct drm_display_mode *dst_mode)
4348 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4349 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4350 dst_mode->crtc_clock = src_mode->crtc_clock;
4351 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4352 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4353 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4354 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4355 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4356 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4357 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4358 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4359 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4360 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4361 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4365 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4366 const struct drm_display_mode *native_mode,
4369 if (scale_enabled) {
4370 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4371 } else if (native_mode->clock == drm_mode->clock &&
4372 native_mode->htotal == drm_mode->htotal &&
4373 native_mode->vtotal == drm_mode->vtotal) {
4374 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4376 /* no scaling nor amdgpu inserted, no need to patch */
4380 static struct dc_sink *
4381 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4383 struct dc_sink_init_data sink_init_data = { 0 };
4384 struct dc_sink *sink = NULL;
4385 sink_init_data.link = aconnector->dc_link;
4386 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4388 sink = dc_sink_create(&sink_init_data);
4390 DRM_ERROR("Failed to create sink!\n");
4393 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4398 static void set_multisync_trigger_params(
4399 struct dc_stream_state *stream)
4401 if (stream->triggered_crtc_reset.enabled) {
4402 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4403 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4407 static void set_master_stream(struct dc_stream_state *stream_set[],
4410 int j, highest_rfr = 0, master_stream = 0;
4412 for (j = 0; j < stream_count; j++) {
4413 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4414 int refresh_rate = 0;
4416 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4417 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4418 if (refresh_rate > highest_rfr) {
4419 highest_rfr = refresh_rate;
4424 for (j = 0; j < stream_count; j++) {
4426 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4430 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4434 if (context->stream_count < 2)
4436 for (i = 0; i < context->stream_count ; i++) {
4437 if (!context->streams[i])
4440 * TODO: add a function to read AMD VSDB bits and set
4441 * crtc_sync_master.multi_sync_enabled flag
4442 * For now it's set to false
4444 set_multisync_trigger_params(context->streams[i]);
4446 set_master_stream(context->streams, context->stream_count);
4449 static struct dc_stream_state *
4450 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4451 const struct drm_display_mode *drm_mode,
4452 const struct dm_connector_state *dm_state,
4453 const struct dc_stream_state *old_stream,
4456 struct drm_display_mode *preferred_mode = NULL;
4457 struct drm_connector *drm_connector;
4458 const struct drm_connector_state *con_state =
4459 dm_state ? &dm_state->base : NULL;
4460 struct dc_stream_state *stream = NULL;
4461 struct drm_display_mode mode = *drm_mode;
4462 bool native_mode_found = false;
4463 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4465 int preferred_refresh = 0;
4466 #if defined(CONFIG_DRM_AMD_DC_DCN)
4467 struct dsc_dec_dpcd_caps dsc_caps;
4469 uint32_t link_bandwidth_kbps;
4471 struct dc_sink *sink = NULL;
4472 if (aconnector == NULL) {
4473 DRM_ERROR("aconnector is NULL!\n");
4477 drm_connector = &aconnector->base;
4479 if (!aconnector->dc_sink) {
4480 sink = create_fake_sink(aconnector);
4484 sink = aconnector->dc_sink;
4485 dc_sink_retain(sink);
4488 stream = dc_create_stream_for_sink(sink);
4490 if (stream == NULL) {
4491 DRM_ERROR("Failed to create stream for sink!\n");
4495 stream->dm_stream_context = aconnector;
4497 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4498 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4500 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4501 /* Search for preferred mode */
4502 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4503 native_mode_found = true;
4507 if (!native_mode_found)
4508 preferred_mode = list_first_entry_or_null(
4509 &aconnector->base.modes,
4510 struct drm_display_mode,
4513 mode_refresh = drm_mode_vrefresh(&mode);
4515 if (preferred_mode == NULL) {
4517 * This may not be an error, the use case is when we have no
4518 * usermode calls to reset and set mode upon hotplug. In this
4519 * case, we call set mode ourselves to restore the previous mode
4520 * and the modelist may not be filled in in time.
4522 DRM_DEBUG_DRIVER("No preferred mode found\n");
4524 decide_crtc_timing_for_drm_display_mode(
4525 &mode, preferred_mode,
4526 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4527 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4531 drm_mode_set_crtcinfo(&mode, 0);
4534 * If scaling is enabled and refresh rate didn't change
4535 * we copy the vic and polarities of the old timings
4537 if (!scale || mode_refresh != preferred_refresh)
4538 fill_stream_properties_from_drm_display_mode(stream,
4539 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4541 fill_stream_properties_from_drm_display_mode(stream,
4542 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4544 stream->timing.flags.DSC = 0;
4546 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4547 #if defined(CONFIG_DRM_AMD_DC_DCN)
4548 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4549 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4550 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4553 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4554 dc_link_get_link_cap(aconnector->dc_link));
4556 #if defined(CONFIG_DRM_AMD_DC_DCN)
4557 if (dsc_caps.is_dsc_supported)
4558 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4560 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4561 link_bandwidth_kbps,
4563 &stream->timing.dsc_cfg))
4564 stream->timing.flags.DSC = 1;
4568 update_stream_scaling_settings(&mode, dm_state, stream);
4571 &stream->audio_info,
4575 update_stream_signal(stream, sink);
4577 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4578 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4579 if (stream->link->psr_settings.psr_feature_enabled) {
4581 // should decide stream support vsc sdp colorimetry capability
4582 // before building vsc info packet
4584 stream->use_vsc_sdp_for_colorimetry = false;
4585 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4586 stream->use_vsc_sdp_for_colorimetry =
4587 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4589 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4590 stream->use_vsc_sdp_for_colorimetry = true;
4592 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4595 dc_sink_release(sink);
4600 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4602 drm_crtc_cleanup(crtc);
4606 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4607 struct drm_crtc_state *state)
4609 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4611 /* TODO Destroy dc_stream objects are stream object is flattened */
4613 dc_stream_release(cur->stream);
4616 __drm_atomic_helper_crtc_destroy_state(state);
4622 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4624 struct dm_crtc_state *state;
4627 dm_crtc_destroy_state(crtc, crtc->state);
4629 state = kzalloc(sizeof(*state), GFP_KERNEL);
4630 if (WARN_ON(!state))
4633 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4636 static struct drm_crtc_state *
4637 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4639 struct dm_crtc_state *state, *cur;
4641 cur = to_dm_crtc_state(crtc->state);
4643 if (WARN_ON(!crtc->state))
4646 state = kzalloc(sizeof(*state), GFP_KERNEL);
4650 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4653 state->stream = cur->stream;
4654 dc_stream_retain(state->stream);
4657 state->active_planes = cur->active_planes;
4658 state->vrr_params = cur->vrr_params;
4659 state->vrr_infopacket = cur->vrr_infopacket;
4660 state->abm_level = cur->abm_level;
4661 state->vrr_supported = cur->vrr_supported;
4662 state->freesync_config = cur->freesync_config;
4663 state->crc_src = cur->crc_src;
4664 state->cm_has_degamma = cur->cm_has_degamma;
4665 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4667 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4669 return &state->base;
4672 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4674 enum dc_irq_source irq_source;
4675 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4676 struct amdgpu_device *adev = crtc->dev->dev_private;
4679 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4681 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4683 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4684 acrtc->crtc_id, enable ? "en" : "dis", rc);
4688 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4690 enum dc_irq_source irq_source;
4691 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4692 struct amdgpu_device *adev = crtc->dev->dev_private;
4693 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4697 /* vblank irq on -> Only need vupdate irq in vrr mode */
4698 if (amdgpu_dm_vrr_active(acrtc_state))
4699 rc = dm_set_vupdate_irq(crtc, true);
4701 /* vblank irq off -> vupdate irq off */
4702 rc = dm_set_vupdate_irq(crtc, false);
4708 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4709 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4712 static int dm_enable_vblank(struct drm_crtc *crtc)
4714 return dm_set_vblank(crtc, true);
4717 static void dm_disable_vblank(struct drm_crtc *crtc)
4719 dm_set_vblank(crtc, false);
4722 /* Implemented only the options currently availible for the driver */
4723 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4724 .reset = dm_crtc_reset_state,
4725 .destroy = amdgpu_dm_crtc_destroy,
4726 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4727 .set_config = drm_atomic_helper_set_config,
4728 .page_flip = drm_atomic_helper_page_flip,
4729 .atomic_duplicate_state = dm_crtc_duplicate_state,
4730 .atomic_destroy_state = dm_crtc_destroy_state,
4731 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4732 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4733 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4734 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4735 .enable_vblank = dm_enable_vblank,
4736 .disable_vblank = dm_disable_vblank,
4737 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4740 static enum drm_connector_status
4741 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4744 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4748 * 1. This interface is NOT called in context of HPD irq.
4749 * 2. This interface *is called* in context of user-mode ioctl. Which
4750 * makes it a bad place for *any* MST-related activity.
4753 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4754 !aconnector->fake_enable)
4755 connected = (aconnector->dc_sink != NULL);
4757 connected = (aconnector->base.force == DRM_FORCE_ON);
4759 return (connected ? connector_status_connected :
4760 connector_status_disconnected);
4763 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4764 struct drm_connector_state *connector_state,
4765 struct drm_property *property,
4768 struct drm_device *dev = connector->dev;
4769 struct amdgpu_device *adev = dev->dev_private;
4770 struct dm_connector_state *dm_old_state =
4771 to_dm_connector_state(connector->state);
4772 struct dm_connector_state *dm_new_state =
4773 to_dm_connector_state(connector_state);
4777 if (property == dev->mode_config.scaling_mode_property) {
4778 enum amdgpu_rmx_type rmx_type;
4781 case DRM_MODE_SCALE_CENTER:
4782 rmx_type = RMX_CENTER;
4784 case DRM_MODE_SCALE_ASPECT:
4785 rmx_type = RMX_ASPECT;
4787 case DRM_MODE_SCALE_FULLSCREEN:
4788 rmx_type = RMX_FULL;
4790 case DRM_MODE_SCALE_NONE:
4796 if (dm_old_state->scaling == rmx_type)
4799 dm_new_state->scaling = rmx_type;
4801 } else if (property == adev->mode_info.underscan_hborder_property) {
4802 dm_new_state->underscan_hborder = val;
4804 } else if (property == adev->mode_info.underscan_vborder_property) {
4805 dm_new_state->underscan_vborder = val;
4807 } else if (property == adev->mode_info.underscan_property) {
4808 dm_new_state->underscan_enable = val;
4810 } else if (property == adev->mode_info.abm_level_property) {
4811 dm_new_state->abm_level = val;
4818 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4819 const struct drm_connector_state *state,
4820 struct drm_property *property,
4823 struct drm_device *dev = connector->dev;
4824 struct amdgpu_device *adev = dev->dev_private;
4825 struct dm_connector_state *dm_state =
4826 to_dm_connector_state(state);
4829 if (property == dev->mode_config.scaling_mode_property) {
4830 switch (dm_state->scaling) {
4832 *val = DRM_MODE_SCALE_CENTER;
4835 *val = DRM_MODE_SCALE_ASPECT;
4838 *val = DRM_MODE_SCALE_FULLSCREEN;
4842 *val = DRM_MODE_SCALE_NONE;
4846 } else if (property == adev->mode_info.underscan_hborder_property) {
4847 *val = dm_state->underscan_hborder;
4849 } else if (property == adev->mode_info.underscan_vborder_property) {
4850 *val = dm_state->underscan_vborder;
4852 } else if (property == adev->mode_info.underscan_property) {
4853 *val = dm_state->underscan_enable;
4855 } else if (property == adev->mode_info.abm_level_property) {
4856 *val = dm_state->abm_level;
4863 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4865 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4867 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4870 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4872 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4873 const struct dc_link *link = aconnector->dc_link;
4874 struct amdgpu_device *adev = connector->dev->dev_private;
4875 struct amdgpu_display_manager *dm = &adev->dm;
4877 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4878 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4880 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4881 link->type != dc_connection_none &&
4882 dm->backlight_dev) {
4883 backlight_device_unregister(dm->backlight_dev);
4884 dm->backlight_dev = NULL;
4888 if (aconnector->dc_em_sink)
4889 dc_sink_release(aconnector->dc_em_sink);
4890 aconnector->dc_em_sink = NULL;
4891 if (aconnector->dc_sink)
4892 dc_sink_release(aconnector->dc_sink);
4893 aconnector->dc_sink = NULL;
4895 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4896 drm_connector_unregister(connector);
4897 drm_connector_cleanup(connector);
4898 if (aconnector->i2c) {
4899 i2c_del_adapter(&aconnector->i2c->base);
4900 kfree(aconnector->i2c);
4902 kfree(aconnector->dm_dp_aux.aux.name);
4907 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4909 struct dm_connector_state *state =
4910 to_dm_connector_state(connector->state);
4912 if (connector->state)
4913 __drm_atomic_helper_connector_destroy_state(connector->state);
4917 state = kzalloc(sizeof(*state), GFP_KERNEL);
4920 state->scaling = RMX_OFF;
4921 state->underscan_enable = false;
4922 state->underscan_hborder = 0;
4923 state->underscan_vborder = 0;
4924 state->base.max_requested_bpc = 8;
4925 state->vcpi_slots = 0;
4927 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4928 state->abm_level = amdgpu_dm_abm_level;
4930 __drm_atomic_helper_connector_reset(connector, &state->base);
4934 struct drm_connector_state *
4935 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4937 struct dm_connector_state *state =
4938 to_dm_connector_state(connector->state);
4940 struct dm_connector_state *new_state =
4941 kmemdup(state, sizeof(*state), GFP_KERNEL);
4946 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4948 new_state->freesync_capable = state->freesync_capable;
4949 new_state->abm_level = state->abm_level;
4950 new_state->scaling = state->scaling;
4951 new_state->underscan_enable = state->underscan_enable;
4952 new_state->underscan_hborder = state->underscan_hborder;
4953 new_state->underscan_vborder = state->underscan_vborder;
4954 new_state->vcpi_slots = state->vcpi_slots;
4955 new_state->pbn = state->pbn;
4956 return &new_state->base;
4960 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4962 struct amdgpu_dm_connector *amdgpu_dm_connector =
4963 to_amdgpu_dm_connector(connector);
4966 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4967 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4968 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4969 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4974 #if defined(CONFIG_DEBUG_FS)
4975 connector_debugfs_init(amdgpu_dm_connector);
4981 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4982 .reset = amdgpu_dm_connector_funcs_reset,
4983 .detect = amdgpu_dm_connector_detect,
4984 .fill_modes = drm_helper_probe_single_connector_modes,
4985 .destroy = amdgpu_dm_connector_destroy,
4986 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4987 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4988 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4989 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4990 .late_register = amdgpu_dm_connector_late_register,
4991 .early_unregister = amdgpu_dm_connector_unregister
4994 static int get_modes(struct drm_connector *connector)
4996 return amdgpu_dm_connector_get_modes(connector);
4999 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5001 struct dc_sink_init_data init_params = {
5002 .link = aconnector->dc_link,
5003 .sink_signal = SIGNAL_TYPE_VIRTUAL
5007 if (!aconnector->base.edid_blob_ptr) {
5008 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5009 aconnector->base.name);
5011 aconnector->base.force = DRM_FORCE_OFF;
5012 aconnector->base.override_edid = false;
5016 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5018 aconnector->edid = edid;
5020 aconnector->dc_em_sink = dc_link_add_remote_sink(
5021 aconnector->dc_link,
5023 (edid->extensions + 1) * EDID_LENGTH,
5026 if (aconnector->base.force == DRM_FORCE_ON) {
5027 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5028 aconnector->dc_link->local_sink :
5029 aconnector->dc_em_sink;
5030 dc_sink_retain(aconnector->dc_sink);
5034 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5036 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5039 * In case of headless boot with force on for DP managed connector
5040 * Those settings have to be != 0 to get initial modeset
5042 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5043 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5044 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5048 aconnector->base.override_edid = true;
5049 create_eml_sink(aconnector);
5052 static struct dc_stream_state *
5053 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5054 const struct drm_display_mode *drm_mode,
5055 const struct dm_connector_state *dm_state,
5056 const struct dc_stream_state *old_stream)
5058 struct drm_connector *connector = &aconnector->base;
5059 struct amdgpu_device *adev = connector->dev->dev_private;
5060 struct dc_stream_state *stream;
5061 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5062 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5063 enum dc_status dc_result = DC_OK;
5066 stream = create_stream_for_sink(aconnector, drm_mode,
5067 dm_state, old_stream,
5069 if (stream == NULL) {
5070 DRM_ERROR("Failed to create stream for sink!\n");
5074 dc_result = dc_validate_stream(adev->dm.dc, stream);
5076 if (dc_result != DC_OK) {
5077 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5082 dc_status_to_str(dc_result));
5084 dc_stream_release(stream);
5086 requested_bpc -= 2; /* lower bpc to retry validation */
5089 } while (stream == NULL && requested_bpc >= 6);
5094 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5095 struct drm_display_mode *mode)
5097 int result = MODE_ERROR;
5098 struct dc_sink *dc_sink;
5099 /* TODO: Unhardcode stream count */
5100 struct dc_stream_state *stream;
5101 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5103 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5104 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5108 * Only run this the first time mode_valid is called to initilialize
5111 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5112 !aconnector->dc_em_sink)
5113 handle_edid_mgmt(aconnector);
5115 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5117 if (dc_sink == NULL) {
5118 DRM_ERROR("dc_sink is NULL!\n");
5122 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5124 dc_stream_release(stream);
5129 /* TODO: error handling*/
5133 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5134 struct dc_info_packet *out)
5136 struct hdmi_drm_infoframe frame;
5137 unsigned char buf[30]; /* 26 + 4 */
5141 memset(out, 0, sizeof(*out));
5143 if (!state->hdr_output_metadata)
5146 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5150 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5154 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5158 /* Prepare the infopacket for DC. */
5159 switch (state->connector->connector_type) {
5160 case DRM_MODE_CONNECTOR_HDMIA:
5161 out->hb0 = 0x87; /* type */
5162 out->hb1 = 0x01; /* version */
5163 out->hb2 = 0x1A; /* length */
5164 out->sb[0] = buf[3]; /* checksum */
5168 case DRM_MODE_CONNECTOR_DisplayPort:
5169 case DRM_MODE_CONNECTOR_eDP:
5170 out->hb0 = 0x00; /* sdp id, zero */
5171 out->hb1 = 0x87; /* type */
5172 out->hb2 = 0x1D; /* payload len - 1 */
5173 out->hb3 = (0x13 << 2); /* sdp version */
5174 out->sb[0] = 0x01; /* version */
5175 out->sb[1] = 0x1A; /* length */
5183 memcpy(&out->sb[i], &buf[4], 26);
5186 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5187 sizeof(out->sb), false);
5193 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5194 const struct drm_connector_state *new_state)
5196 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5197 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5199 if (old_blob != new_blob) {
5200 if (old_blob && new_blob &&
5201 old_blob->length == new_blob->length)
5202 return memcmp(old_blob->data, new_blob->data,
5212 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5213 struct drm_atomic_state *state)
5215 struct drm_connector_state *new_con_state =
5216 drm_atomic_get_new_connector_state(state, conn);
5217 struct drm_connector_state *old_con_state =
5218 drm_atomic_get_old_connector_state(state, conn);
5219 struct drm_crtc *crtc = new_con_state->crtc;
5220 struct drm_crtc_state *new_crtc_state;
5226 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5227 struct dc_info_packet hdr_infopacket;
5229 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5233 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5234 if (IS_ERR(new_crtc_state))
5235 return PTR_ERR(new_crtc_state);
5238 * DC considers the stream backends changed if the
5239 * static metadata changes. Forcing the modeset also
5240 * gives a simple way for userspace to switch from
5241 * 8bpc to 10bpc when setting the metadata to enter
5244 * Changing the static metadata after it's been
5245 * set is permissible, however. So only force a
5246 * modeset if we're entering or exiting HDR.
5248 new_crtc_state->mode_changed =
5249 !old_con_state->hdr_output_metadata ||
5250 !new_con_state->hdr_output_metadata;
5256 static const struct drm_connector_helper_funcs
5257 amdgpu_dm_connector_helper_funcs = {
5259 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5260 * modes will be filtered by drm_mode_validate_size(), and those modes
5261 * are missing after user start lightdm. So we need to renew modes list.
5262 * in get_modes call back, not just return the modes count
5264 .get_modes = get_modes,
5265 .mode_valid = amdgpu_dm_connector_mode_valid,
5266 .atomic_check = amdgpu_dm_connector_atomic_check,
5269 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5273 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5275 struct drm_device *dev = new_crtc_state->crtc->dev;
5276 struct drm_plane *plane;
5278 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5279 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5286 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5288 struct drm_atomic_state *state = new_crtc_state->state;
5289 struct drm_plane *plane;
5292 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5293 struct drm_plane_state *new_plane_state;
5295 /* Cursor planes are "fake". */
5296 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5299 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5301 if (!new_plane_state) {
5303 * The plane is enable on the CRTC and hasn't changed
5304 * state. This means that it previously passed
5305 * validation and is therefore enabled.
5311 /* We need a framebuffer to be considered enabled. */
5312 num_active += (new_plane_state->fb != NULL);
5318 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5319 struct drm_crtc_state *new_crtc_state)
5321 struct dm_crtc_state *dm_new_crtc_state =
5322 to_dm_crtc_state(new_crtc_state);
5324 dm_new_crtc_state->active_planes = 0;
5326 if (!dm_new_crtc_state->stream)
5329 dm_new_crtc_state->active_planes =
5330 count_crtc_active_planes(new_crtc_state);
5333 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5334 struct drm_crtc_state *state)
5336 struct amdgpu_device *adev = crtc->dev->dev_private;
5337 struct dc *dc = adev->dm.dc;
5338 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5341 dm_update_crtc_active_planes(crtc, state);
5343 if (unlikely(!dm_crtc_state->stream &&
5344 modeset_required(state, NULL, dm_crtc_state->stream))) {
5349 /* In some use cases, like reset, no stream is attached */
5350 if (!dm_crtc_state->stream)
5354 * We want at least one hardware plane enabled to use
5355 * the stream with a cursor enabled.
5357 if (state->enable && state->active &&
5358 does_crtc_have_active_cursor(state) &&
5359 dm_crtc_state->active_planes == 0)
5362 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5368 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5369 const struct drm_display_mode *mode,
5370 struct drm_display_mode *adjusted_mode)
5375 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5376 .disable = dm_crtc_helper_disable,
5377 .atomic_check = dm_crtc_helper_atomic_check,
5378 .mode_fixup = dm_crtc_helper_mode_fixup,
5379 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5382 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5387 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5389 switch (display_color_depth) {
5390 case COLOR_DEPTH_666:
5392 case COLOR_DEPTH_888:
5394 case COLOR_DEPTH_101010:
5396 case COLOR_DEPTH_121212:
5398 case COLOR_DEPTH_141414:
5400 case COLOR_DEPTH_161616:
5408 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5409 struct drm_crtc_state *crtc_state,
5410 struct drm_connector_state *conn_state)
5412 struct drm_atomic_state *state = crtc_state->state;
5413 struct drm_connector *connector = conn_state->connector;
5414 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5415 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5416 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5417 struct drm_dp_mst_topology_mgr *mst_mgr;
5418 struct drm_dp_mst_port *mst_port;
5419 enum dc_color_depth color_depth;
5421 bool is_y420 = false;
5423 if (!aconnector->port || !aconnector->dc_sink)
5426 mst_port = aconnector->port;
5427 mst_mgr = &aconnector->mst_port->mst_mgr;
5429 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5432 if (!state->duplicated) {
5433 int max_bpc = conn_state->max_requested_bpc;
5434 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5435 aconnector->force_yuv420_output;
5436 color_depth = convert_color_depth_from_display_info(connector,
5439 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5440 clock = adjusted_mode->clock;
5441 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5443 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5446 dm_new_connector_state->pbn,
5447 dm_mst_get_pbn_divider(aconnector->dc_link));
5448 if (dm_new_connector_state->vcpi_slots < 0) {
5449 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5450 return dm_new_connector_state->vcpi_slots;
5455 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5456 .disable = dm_encoder_helper_disable,
5457 .atomic_check = dm_encoder_helper_atomic_check
5460 #if defined(CONFIG_DRM_AMD_DC_DCN)
5461 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5462 struct dc_state *dc_state)
5464 struct dc_stream_state *stream = NULL;
5465 struct drm_connector *connector;
5466 struct drm_connector_state *new_con_state, *old_con_state;
5467 struct amdgpu_dm_connector *aconnector;
5468 struct dm_connector_state *dm_conn_state;
5469 int i, j, clock, bpp;
5470 int vcpi, pbn_div, pbn = 0;
5472 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5474 aconnector = to_amdgpu_dm_connector(connector);
5476 if (!aconnector->port)
5479 if (!new_con_state || !new_con_state->crtc)
5482 dm_conn_state = to_dm_connector_state(new_con_state);
5484 for (j = 0; j < dc_state->stream_count; j++) {
5485 stream = dc_state->streams[j];
5489 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5498 if (stream->timing.flags.DSC != 1) {
5499 drm_dp_mst_atomic_enable_dsc(state,
5507 pbn_div = dm_mst_get_pbn_divider(stream->link);
5508 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5509 clock = stream->timing.pix_clk_100hz / 10;
5510 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5511 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5518 dm_conn_state->pbn = pbn;
5519 dm_conn_state->vcpi_slots = vcpi;
5525 static void dm_drm_plane_reset(struct drm_plane *plane)
5527 struct dm_plane_state *amdgpu_state = NULL;
5530 plane->funcs->atomic_destroy_state(plane, plane->state);
5532 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5533 WARN_ON(amdgpu_state == NULL);
5536 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5539 static struct drm_plane_state *
5540 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5542 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5544 old_dm_plane_state = to_dm_plane_state(plane->state);
5545 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5546 if (!dm_plane_state)
5549 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5551 if (old_dm_plane_state->dc_state) {
5552 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5553 dc_plane_state_retain(dm_plane_state->dc_state);
5556 return &dm_plane_state->base;
5559 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5560 struct drm_plane_state *state)
5562 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5564 if (dm_plane_state->dc_state)
5565 dc_plane_state_release(dm_plane_state->dc_state);
5567 drm_atomic_helper_plane_destroy_state(plane, state);
5570 static const struct drm_plane_funcs dm_plane_funcs = {
5571 .update_plane = drm_atomic_helper_update_plane,
5572 .disable_plane = drm_atomic_helper_disable_plane,
5573 .destroy = drm_primary_helper_destroy,
5574 .reset = dm_drm_plane_reset,
5575 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5576 .atomic_destroy_state = dm_drm_plane_destroy_state,
5579 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5580 struct drm_plane_state *new_state)
5582 struct amdgpu_framebuffer *afb;
5583 struct drm_gem_object *obj;
5584 struct amdgpu_device *adev;
5585 struct amdgpu_bo *rbo;
5586 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5587 struct list_head list;
5588 struct ttm_validate_buffer tv;
5589 struct ww_acquire_ctx ticket;
5590 uint64_t tiling_flags;
5593 bool tmz_surface = false;
5594 bool force_disable_dcc = false;
5596 dm_plane_state_old = to_dm_plane_state(plane->state);
5597 dm_plane_state_new = to_dm_plane_state(new_state);
5599 if (!new_state->fb) {
5600 DRM_DEBUG_DRIVER("No FB bound\n");
5604 afb = to_amdgpu_framebuffer(new_state->fb);
5605 obj = new_state->fb->obj[0];
5606 rbo = gem_to_amdgpu_bo(obj);
5607 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5608 INIT_LIST_HEAD(&list);
5612 list_add(&tv.head, &list);
5614 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5616 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5620 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5621 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5623 domain = AMDGPU_GEM_DOMAIN_VRAM;
5625 r = amdgpu_bo_pin(rbo, domain);
5626 if (unlikely(r != 0)) {
5627 if (r != -ERESTARTSYS)
5628 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5629 ttm_eu_backoff_reservation(&ticket, &list);
5633 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5634 if (unlikely(r != 0)) {
5635 amdgpu_bo_unpin(rbo);
5636 ttm_eu_backoff_reservation(&ticket, &list);
5637 DRM_ERROR("%p bind failed\n", rbo);
5641 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5643 tmz_surface = amdgpu_bo_encrypted(rbo);
5645 ttm_eu_backoff_reservation(&ticket, &list);
5647 afb->address = amdgpu_bo_gpu_offset(rbo);
5651 if (dm_plane_state_new->dc_state &&
5652 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5653 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5655 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5656 fill_plane_buffer_attributes(
5657 adev, afb, plane_state->format, plane_state->rotation,
5658 tiling_flags, &plane_state->tiling_info,
5659 &plane_state->plane_size, &plane_state->dcc,
5660 &plane_state->address, tmz_surface,
5667 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5668 struct drm_plane_state *old_state)
5670 struct amdgpu_bo *rbo;
5676 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5677 r = amdgpu_bo_reserve(rbo, false);
5679 DRM_ERROR("failed to reserve rbo before unpin\n");
5683 amdgpu_bo_unpin(rbo);
5684 amdgpu_bo_unreserve(rbo);
5685 amdgpu_bo_unref(&rbo);
5688 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5689 struct drm_crtc_state *new_crtc_state)
5691 int max_downscale = 0;
5692 int max_upscale = INT_MAX;
5694 /* TODO: These should be checked against DC plane caps */
5695 return drm_atomic_helper_check_plane_state(
5696 state, new_crtc_state, max_downscale, max_upscale, true, true);
5699 static int dm_plane_atomic_check(struct drm_plane *plane,
5700 struct drm_plane_state *state)
5702 struct amdgpu_device *adev = plane->dev->dev_private;
5703 struct dc *dc = adev->dm.dc;
5704 struct dm_plane_state *dm_plane_state;
5705 struct dc_scaling_info scaling_info;
5706 struct drm_crtc_state *new_crtc_state;
5709 dm_plane_state = to_dm_plane_state(state);
5711 if (!dm_plane_state->dc_state)
5715 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5716 if (!new_crtc_state)
5719 ret = dm_plane_helper_check_state(state, new_crtc_state);
5723 ret = fill_dc_scaling_info(state, &scaling_info);
5727 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5733 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5734 struct drm_plane_state *new_plane_state)
5736 /* Only support async updates on cursor planes. */
5737 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5743 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5744 struct drm_plane_state *new_state)
5746 struct drm_plane_state *old_state =
5747 drm_atomic_get_old_plane_state(new_state->state, plane);
5749 swap(plane->state->fb, new_state->fb);
5751 plane->state->src_x = new_state->src_x;
5752 plane->state->src_y = new_state->src_y;
5753 plane->state->src_w = new_state->src_w;
5754 plane->state->src_h = new_state->src_h;
5755 plane->state->crtc_x = new_state->crtc_x;
5756 plane->state->crtc_y = new_state->crtc_y;
5757 plane->state->crtc_w = new_state->crtc_w;
5758 plane->state->crtc_h = new_state->crtc_h;
5760 handle_cursor_update(plane, old_state);
5763 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5764 .prepare_fb = dm_plane_helper_prepare_fb,
5765 .cleanup_fb = dm_plane_helper_cleanup_fb,
5766 .atomic_check = dm_plane_atomic_check,
5767 .atomic_async_check = dm_plane_atomic_async_check,
5768 .atomic_async_update = dm_plane_atomic_async_update
5772 * TODO: these are currently initialized to rgb formats only.
5773 * For future use cases we should either initialize them dynamically based on
5774 * plane capabilities, or initialize this array to all formats, so internal drm
5775 * check will succeed, and let DC implement proper check
5777 static const uint32_t rgb_formats[] = {
5778 DRM_FORMAT_XRGB8888,
5779 DRM_FORMAT_ARGB8888,
5780 DRM_FORMAT_RGBA8888,
5781 DRM_FORMAT_XRGB2101010,
5782 DRM_FORMAT_XBGR2101010,
5783 DRM_FORMAT_ARGB2101010,
5784 DRM_FORMAT_ABGR2101010,
5785 DRM_FORMAT_XBGR8888,
5786 DRM_FORMAT_ABGR8888,
5790 static const uint32_t overlay_formats[] = {
5791 DRM_FORMAT_XRGB8888,
5792 DRM_FORMAT_ARGB8888,
5793 DRM_FORMAT_RGBA8888,
5794 DRM_FORMAT_XBGR8888,
5795 DRM_FORMAT_ABGR8888,
5799 static const u32 cursor_formats[] = {
5803 static int get_plane_formats(const struct drm_plane *plane,
5804 const struct dc_plane_cap *plane_cap,
5805 uint32_t *formats, int max_formats)
5807 int i, num_formats = 0;
5810 * TODO: Query support for each group of formats directly from
5811 * DC plane caps. This will require adding more formats to the
5815 switch (plane->type) {
5816 case DRM_PLANE_TYPE_PRIMARY:
5817 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5818 if (num_formats >= max_formats)
5821 formats[num_formats++] = rgb_formats[i];
5824 if (plane_cap && plane_cap->pixel_format_support.nv12)
5825 formats[num_formats++] = DRM_FORMAT_NV12;
5826 if (plane_cap && plane_cap->pixel_format_support.p010)
5827 formats[num_formats++] = DRM_FORMAT_P010;
5828 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5829 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5830 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5831 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5832 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5836 case DRM_PLANE_TYPE_OVERLAY:
5837 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5838 if (num_formats >= max_formats)
5841 formats[num_formats++] = overlay_formats[i];
5845 case DRM_PLANE_TYPE_CURSOR:
5846 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5847 if (num_formats >= max_formats)
5850 formats[num_formats++] = cursor_formats[i];
5858 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5859 struct drm_plane *plane,
5860 unsigned long possible_crtcs,
5861 const struct dc_plane_cap *plane_cap)
5863 uint32_t formats[32];
5866 unsigned int supported_rotations;
5868 num_formats = get_plane_formats(plane, plane_cap, formats,
5869 ARRAY_SIZE(formats));
5871 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5872 &dm_plane_funcs, formats, num_formats,
5873 NULL, plane->type, NULL);
5877 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5878 plane_cap && plane_cap->per_pixel_alpha) {
5879 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5880 BIT(DRM_MODE_BLEND_PREMULTI);
5882 drm_plane_create_alpha_property(plane);
5883 drm_plane_create_blend_mode_property(plane, blend_caps);
5886 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5888 (plane_cap->pixel_format_support.nv12 ||
5889 plane_cap->pixel_format_support.p010)) {
5890 /* This only affects YUV formats. */
5891 drm_plane_create_color_properties(
5893 BIT(DRM_COLOR_YCBCR_BT601) |
5894 BIT(DRM_COLOR_YCBCR_BT709) |
5895 BIT(DRM_COLOR_YCBCR_BT2020),
5896 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5897 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5898 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5901 supported_rotations =
5902 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5903 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5905 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5906 supported_rotations);
5908 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5910 /* Create (reset) the plane state */
5911 if (plane->funcs->reset)
5912 plane->funcs->reset(plane);
5917 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5918 struct drm_plane *plane,
5919 uint32_t crtc_index)
5921 struct amdgpu_crtc *acrtc = NULL;
5922 struct drm_plane *cursor_plane;
5926 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5930 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5931 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5933 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5937 res = drm_crtc_init_with_planes(
5942 &amdgpu_dm_crtc_funcs, NULL);
5947 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5949 /* Create (reset) the plane state */
5950 if (acrtc->base.funcs->reset)
5951 acrtc->base.funcs->reset(&acrtc->base);
5953 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5954 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5956 acrtc->crtc_id = crtc_index;
5957 acrtc->base.enabled = false;
5958 acrtc->otg_inst = -1;
5960 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5961 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5962 true, MAX_COLOR_LUT_ENTRIES);
5963 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5969 kfree(cursor_plane);
5974 static int to_drm_connector_type(enum signal_type st)
5977 case SIGNAL_TYPE_HDMI_TYPE_A:
5978 return DRM_MODE_CONNECTOR_HDMIA;
5979 case SIGNAL_TYPE_EDP:
5980 return DRM_MODE_CONNECTOR_eDP;
5981 case SIGNAL_TYPE_LVDS:
5982 return DRM_MODE_CONNECTOR_LVDS;
5983 case SIGNAL_TYPE_RGB:
5984 return DRM_MODE_CONNECTOR_VGA;
5985 case SIGNAL_TYPE_DISPLAY_PORT:
5986 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5987 return DRM_MODE_CONNECTOR_DisplayPort;
5988 case SIGNAL_TYPE_DVI_DUAL_LINK:
5989 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5990 return DRM_MODE_CONNECTOR_DVID;
5991 case SIGNAL_TYPE_VIRTUAL:
5992 return DRM_MODE_CONNECTOR_VIRTUAL;
5995 return DRM_MODE_CONNECTOR_Unknown;
5999 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6001 struct drm_encoder *encoder;
6003 /* There is only one encoder per connector */
6004 drm_connector_for_each_possible_encoder(connector, encoder)
6010 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6012 struct drm_encoder *encoder;
6013 struct amdgpu_encoder *amdgpu_encoder;
6015 encoder = amdgpu_dm_connector_to_encoder(connector);
6017 if (encoder == NULL)
6020 amdgpu_encoder = to_amdgpu_encoder(encoder);
6022 amdgpu_encoder->native_mode.clock = 0;
6024 if (!list_empty(&connector->probed_modes)) {
6025 struct drm_display_mode *preferred_mode = NULL;
6027 list_for_each_entry(preferred_mode,
6028 &connector->probed_modes,
6030 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6031 amdgpu_encoder->native_mode = *preferred_mode;
6039 static struct drm_display_mode *
6040 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6042 int hdisplay, int vdisplay)
6044 struct drm_device *dev = encoder->dev;
6045 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6046 struct drm_display_mode *mode = NULL;
6047 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6049 mode = drm_mode_duplicate(dev, native_mode);
6054 mode->hdisplay = hdisplay;
6055 mode->vdisplay = vdisplay;
6056 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6057 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6063 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6064 struct drm_connector *connector)
6066 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6067 struct drm_display_mode *mode = NULL;
6068 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6069 struct amdgpu_dm_connector *amdgpu_dm_connector =
6070 to_amdgpu_dm_connector(connector);
6074 char name[DRM_DISPLAY_MODE_LEN];
6077 } common_modes[] = {
6078 { "640x480", 640, 480},
6079 { "800x600", 800, 600},
6080 { "1024x768", 1024, 768},
6081 { "1280x720", 1280, 720},
6082 { "1280x800", 1280, 800},
6083 {"1280x1024", 1280, 1024},
6084 { "1440x900", 1440, 900},
6085 {"1680x1050", 1680, 1050},
6086 {"1600x1200", 1600, 1200},
6087 {"1920x1080", 1920, 1080},
6088 {"1920x1200", 1920, 1200}
6091 n = ARRAY_SIZE(common_modes);
6093 for (i = 0; i < n; i++) {
6094 struct drm_display_mode *curmode = NULL;
6095 bool mode_existed = false;
6097 if (common_modes[i].w > native_mode->hdisplay ||
6098 common_modes[i].h > native_mode->vdisplay ||
6099 (common_modes[i].w == native_mode->hdisplay &&
6100 common_modes[i].h == native_mode->vdisplay))
6103 list_for_each_entry(curmode, &connector->probed_modes, head) {
6104 if (common_modes[i].w == curmode->hdisplay &&
6105 common_modes[i].h == curmode->vdisplay) {
6106 mode_existed = true;
6114 mode = amdgpu_dm_create_common_mode(encoder,
6115 common_modes[i].name, common_modes[i].w,
6117 drm_mode_probed_add(connector, mode);
6118 amdgpu_dm_connector->num_modes++;
6122 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6125 struct amdgpu_dm_connector *amdgpu_dm_connector =
6126 to_amdgpu_dm_connector(connector);
6129 /* empty probed_modes */
6130 INIT_LIST_HEAD(&connector->probed_modes);
6131 amdgpu_dm_connector->num_modes =
6132 drm_add_edid_modes(connector, edid);
6134 /* sorting the probed modes before calling function
6135 * amdgpu_dm_get_native_mode() since EDID can have
6136 * more than one preferred mode. The modes that are
6137 * later in the probed mode list could be of higher
6138 * and preferred resolution. For example, 3840x2160
6139 * resolution in base EDID preferred timing and 4096x2160
6140 * preferred resolution in DID extension block later.
6142 drm_mode_sort(&connector->probed_modes);
6143 amdgpu_dm_get_native_mode(connector);
6145 amdgpu_dm_connector->num_modes = 0;
6149 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6151 struct amdgpu_dm_connector *amdgpu_dm_connector =
6152 to_amdgpu_dm_connector(connector);
6153 struct drm_encoder *encoder;
6154 struct edid *edid = amdgpu_dm_connector->edid;
6156 encoder = amdgpu_dm_connector_to_encoder(connector);
6158 if (!edid || !drm_edid_is_valid(edid)) {
6159 amdgpu_dm_connector->num_modes =
6160 drm_add_modes_noedid(connector, 640, 480);
6162 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6163 amdgpu_dm_connector_add_common_modes(encoder, connector);
6165 amdgpu_dm_fbc_init(connector);
6167 return amdgpu_dm_connector->num_modes;
6170 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6171 struct amdgpu_dm_connector *aconnector,
6173 struct dc_link *link,
6176 struct amdgpu_device *adev = dm->ddev->dev_private;
6179 * Some of the properties below require access to state, like bpc.
6180 * Allocate some default initial connector state with our reset helper.
6182 if (aconnector->base.funcs->reset)
6183 aconnector->base.funcs->reset(&aconnector->base);
6185 aconnector->connector_id = link_index;
6186 aconnector->dc_link = link;
6187 aconnector->base.interlace_allowed = false;
6188 aconnector->base.doublescan_allowed = false;
6189 aconnector->base.stereo_allowed = false;
6190 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6191 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6192 aconnector->audio_inst = -1;
6193 mutex_init(&aconnector->hpd_lock);
6196 * configure support HPD hot plug connector_>polled default value is 0
6197 * which means HPD hot plug not supported
6199 switch (connector_type) {
6200 case DRM_MODE_CONNECTOR_HDMIA:
6201 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6202 aconnector->base.ycbcr_420_allowed =
6203 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6205 case DRM_MODE_CONNECTOR_DisplayPort:
6206 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6207 aconnector->base.ycbcr_420_allowed =
6208 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6210 case DRM_MODE_CONNECTOR_DVID:
6211 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6217 drm_object_attach_property(&aconnector->base.base,
6218 dm->ddev->mode_config.scaling_mode_property,
6219 DRM_MODE_SCALE_NONE);
6221 drm_object_attach_property(&aconnector->base.base,
6222 adev->mode_info.underscan_property,
6224 drm_object_attach_property(&aconnector->base.base,
6225 adev->mode_info.underscan_hborder_property,
6227 drm_object_attach_property(&aconnector->base.base,
6228 adev->mode_info.underscan_vborder_property,
6231 if (!aconnector->mst_port)
6232 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6234 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6235 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6236 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6238 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6239 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6240 drm_object_attach_property(&aconnector->base.base,
6241 adev->mode_info.abm_level_property, 0);
6244 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6245 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6246 connector_type == DRM_MODE_CONNECTOR_eDP) {
6247 drm_object_attach_property(
6248 &aconnector->base.base,
6249 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6251 if (!aconnector->mst_port)
6252 drm_connector_attach_vrr_capable_property(&aconnector->base);
6254 #ifdef CONFIG_DRM_AMD_DC_HDCP
6255 if (adev->dm.hdcp_workqueue)
6256 drm_connector_attach_content_protection_property(&aconnector->base, true);
6261 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6262 struct i2c_msg *msgs, int num)
6264 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6265 struct ddc_service *ddc_service = i2c->ddc_service;
6266 struct i2c_command cmd;
6270 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6275 cmd.number_of_payloads = num;
6276 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6279 for (i = 0; i < num; i++) {
6280 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6281 cmd.payloads[i].address = msgs[i].addr;
6282 cmd.payloads[i].length = msgs[i].len;
6283 cmd.payloads[i].data = msgs[i].buf;
6287 ddc_service->ctx->dc,
6288 ddc_service->ddc_pin->hw_info.ddc_channel,
6292 kfree(cmd.payloads);
6296 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6298 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6301 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6302 .master_xfer = amdgpu_dm_i2c_xfer,
6303 .functionality = amdgpu_dm_i2c_func,
6306 static struct amdgpu_i2c_adapter *
6307 create_i2c(struct ddc_service *ddc_service,
6311 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6312 struct amdgpu_i2c_adapter *i2c;
6314 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6317 i2c->base.owner = THIS_MODULE;
6318 i2c->base.class = I2C_CLASS_DDC;
6319 i2c->base.dev.parent = &adev->pdev->dev;
6320 i2c->base.algo = &amdgpu_dm_i2c_algo;
6321 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6322 i2c_set_adapdata(&i2c->base, i2c);
6323 i2c->ddc_service = ddc_service;
6324 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6331 * Note: this function assumes that dc_link_detect() was called for the
6332 * dc_link which will be represented by this aconnector.
6334 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6335 struct amdgpu_dm_connector *aconnector,
6336 uint32_t link_index,
6337 struct amdgpu_encoder *aencoder)
6341 struct dc *dc = dm->dc;
6342 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6343 struct amdgpu_i2c_adapter *i2c;
6345 link->priv = aconnector;
6347 DRM_DEBUG_DRIVER("%s()\n", __func__);
6349 i2c = create_i2c(link->ddc, link->link_index, &res);
6351 DRM_ERROR("Failed to create i2c adapter data\n");
6355 aconnector->i2c = i2c;
6356 res = i2c_add_adapter(&i2c->base);
6359 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6363 connector_type = to_drm_connector_type(link->connector_signal);
6365 res = drm_connector_init_with_ddc(
6368 &amdgpu_dm_connector_funcs,
6373 DRM_ERROR("connector_init failed\n");
6374 aconnector->connector_id = -1;
6378 drm_connector_helper_add(
6380 &amdgpu_dm_connector_helper_funcs);
6382 amdgpu_dm_connector_init_helper(
6389 drm_connector_attach_encoder(
6390 &aconnector->base, &aencoder->base);
6392 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6393 || connector_type == DRM_MODE_CONNECTOR_eDP)
6394 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6399 aconnector->i2c = NULL;
6404 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6406 switch (adev->mode_info.num_crtc) {
6423 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6424 struct amdgpu_encoder *aencoder,
6425 uint32_t link_index)
6427 struct amdgpu_device *adev = dev->dev_private;
6429 int res = drm_encoder_init(dev,
6431 &amdgpu_dm_encoder_funcs,
6432 DRM_MODE_ENCODER_TMDS,
6435 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6438 aencoder->encoder_id = link_index;
6440 aencoder->encoder_id = -1;
6442 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6447 static void manage_dm_interrupts(struct amdgpu_device *adev,
6448 struct amdgpu_crtc *acrtc,
6452 * We have no guarantee that the frontend index maps to the same
6453 * backend index - some even map to more than one.
6455 * TODO: Use a different interrupt or check DC itself for the mapping.
6458 amdgpu_display_crtc_idx_to_irq_type(
6463 drm_crtc_vblank_on(&acrtc->base);
6466 &adev->pageflip_irq,
6472 &adev->pageflip_irq,
6474 drm_crtc_vblank_off(&acrtc->base);
6478 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6479 struct amdgpu_crtc *acrtc)
6482 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6485 * This reads the current state for the IRQ and force reapplies
6486 * the setting to hardware.
6488 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6492 is_scaling_state_different(const struct dm_connector_state *dm_state,
6493 const struct dm_connector_state *old_dm_state)
6495 if (dm_state->scaling != old_dm_state->scaling)
6497 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6498 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6500 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6501 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6503 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6504 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6509 #ifdef CONFIG_DRM_AMD_DC_HDCP
6510 static bool is_content_protection_different(struct drm_connector_state *state,
6511 const struct drm_connector_state *old_state,
6512 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6514 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6516 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6517 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6518 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6522 /* CP is being re enabled, ignore this */
6523 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6524 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6525 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6529 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6530 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6531 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6532 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6534 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6535 * hot-plug, headless s3, dpms
6537 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6538 aconnector->dc_sink != NULL)
6541 if (old_state->content_protection == state->content_protection)
6544 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6551 static void remove_stream(struct amdgpu_device *adev,
6552 struct amdgpu_crtc *acrtc,
6553 struct dc_stream_state *stream)
6555 /* this is the update mode case */
6557 acrtc->otg_inst = -1;
6558 acrtc->enabled = false;
6561 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6562 struct dc_cursor_position *position)
6564 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6566 int xorigin = 0, yorigin = 0;
6568 position->enable = false;
6572 if (!crtc || !plane->state->fb)
6575 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6576 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6577 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6579 plane->state->crtc_w,
6580 plane->state->crtc_h);
6584 x = plane->state->crtc_x;
6585 y = plane->state->crtc_y;
6587 if (x <= -amdgpu_crtc->max_cursor_width ||
6588 y <= -amdgpu_crtc->max_cursor_height)
6592 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6596 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6599 position->enable = true;
6600 position->translate_by_source = true;
6603 position->x_hotspot = xorigin;
6604 position->y_hotspot = yorigin;
6609 static void handle_cursor_update(struct drm_plane *plane,
6610 struct drm_plane_state *old_plane_state)
6612 struct amdgpu_device *adev = plane->dev->dev_private;
6613 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6614 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6615 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6616 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6617 uint64_t address = afb ? afb->address : 0;
6618 struct dc_cursor_position position;
6619 struct dc_cursor_attributes attributes;
6622 if (!plane->state->fb && !old_plane_state->fb)
6625 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6627 amdgpu_crtc->crtc_id,
6628 plane->state->crtc_w,
6629 plane->state->crtc_h);
6631 ret = get_cursor_position(plane, crtc, &position);
6635 if (!position.enable) {
6636 /* turn off cursor */
6637 if (crtc_state && crtc_state->stream) {
6638 mutex_lock(&adev->dm.dc_lock);
6639 dc_stream_set_cursor_position(crtc_state->stream,
6641 mutex_unlock(&adev->dm.dc_lock);
6646 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6647 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6649 memset(&attributes, 0, sizeof(attributes));
6650 attributes.address.high_part = upper_32_bits(address);
6651 attributes.address.low_part = lower_32_bits(address);
6652 attributes.width = plane->state->crtc_w;
6653 attributes.height = plane->state->crtc_h;
6654 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6655 attributes.rotation_angle = 0;
6656 attributes.attribute_flags.value = 0;
6658 attributes.pitch = attributes.width;
6660 if (crtc_state->stream) {
6661 mutex_lock(&adev->dm.dc_lock);
6662 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6664 DRM_ERROR("DC failed to set cursor attributes\n");
6666 if (!dc_stream_set_cursor_position(crtc_state->stream,
6668 DRM_ERROR("DC failed to set cursor position\n");
6669 mutex_unlock(&adev->dm.dc_lock);
6673 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6676 assert_spin_locked(&acrtc->base.dev->event_lock);
6677 WARN_ON(acrtc->event);
6679 acrtc->event = acrtc->base.state->event;
6681 /* Set the flip status */
6682 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6684 /* Mark this event as consumed */
6685 acrtc->base.state->event = NULL;
6687 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6691 static void update_freesync_state_on_stream(
6692 struct amdgpu_display_manager *dm,
6693 struct dm_crtc_state *new_crtc_state,
6694 struct dc_stream_state *new_stream,
6695 struct dc_plane_state *surface,
6696 u32 flip_timestamp_in_us)
6698 struct mod_vrr_params vrr_params;
6699 struct dc_info_packet vrr_infopacket = {0};
6700 struct amdgpu_device *adev = dm->adev;
6701 unsigned long flags;
6707 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6708 * For now it's sufficient to just guard against these conditions.
6711 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6714 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6715 vrr_params = new_crtc_state->vrr_params;
6718 mod_freesync_handle_preflip(
6719 dm->freesync_module,
6722 flip_timestamp_in_us,
6725 if (adev->family < AMDGPU_FAMILY_AI &&
6726 amdgpu_dm_vrr_active(new_crtc_state)) {
6727 mod_freesync_handle_v_update(dm->freesync_module,
6728 new_stream, &vrr_params);
6730 /* Need to call this before the frame ends. */
6731 dc_stream_adjust_vmin_vmax(dm->dc,
6732 new_crtc_state->stream,
6733 &vrr_params.adjust);
6737 mod_freesync_build_vrr_infopacket(
6738 dm->freesync_module,
6742 TRANSFER_FUNC_UNKNOWN,
6745 new_crtc_state->freesync_timing_changed |=
6746 (memcmp(&new_crtc_state->vrr_params.adjust,
6748 sizeof(vrr_params.adjust)) != 0);
6750 new_crtc_state->freesync_vrr_info_changed |=
6751 (memcmp(&new_crtc_state->vrr_infopacket,
6753 sizeof(vrr_infopacket)) != 0);
6755 new_crtc_state->vrr_params = vrr_params;
6756 new_crtc_state->vrr_infopacket = vrr_infopacket;
6758 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6759 new_stream->vrr_infopacket = vrr_infopacket;
6761 if (new_crtc_state->freesync_vrr_info_changed)
6762 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6763 new_crtc_state->base.crtc->base.id,
6764 (int)new_crtc_state->base.vrr_enabled,
6765 (int)vrr_params.state);
6767 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6770 static void pre_update_freesync_state_on_stream(
6771 struct amdgpu_display_manager *dm,
6772 struct dm_crtc_state *new_crtc_state)
6774 struct dc_stream_state *new_stream = new_crtc_state->stream;
6775 struct mod_vrr_params vrr_params;
6776 struct mod_freesync_config config = new_crtc_state->freesync_config;
6777 struct amdgpu_device *adev = dm->adev;
6778 unsigned long flags;
6784 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6785 * For now it's sufficient to just guard against these conditions.
6787 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6790 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6791 vrr_params = new_crtc_state->vrr_params;
6793 if (new_crtc_state->vrr_supported &&
6794 config.min_refresh_in_uhz &&
6795 config.max_refresh_in_uhz) {
6796 config.state = new_crtc_state->base.vrr_enabled ?
6797 VRR_STATE_ACTIVE_VARIABLE :
6800 config.state = VRR_STATE_UNSUPPORTED;
6803 mod_freesync_build_vrr_params(dm->freesync_module,
6805 &config, &vrr_params);
6807 new_crtc_state->freesync_timing_changed |=
6808 (memcmp(&new_crtc_state->vrr_params.adjust,
6810 sizeof(vrr_params.adjust)) != 0);
6812 new_crtc_state->vrr_params = vrr_params;
6813 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6816 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6817 struct dm_crtc_state *new_state)
6819 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6820 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6822 if (!old_vrr_active && new_vrr_active) {
6823 /* Transition VRR inactive -> active:
6824 * While VRR is active, we must not disable vblank irq, as a
6825 * reenable after disable would compute bogus vblank/pflip
6826 * timestamps if it likely happened inside display front-porch.
6828 * We also need vupdate irq for the actual core vblank handling
6831 dm_set_vupdate_irq(new_state->base.crtc, true);
6832 drm_crtc_vblank_get(new_state->base.crtc);
6833 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6834 __func__, new_state->base.crtc->base.id);
6835 } else if (old_vrr_active && !new_vrr_active) {
6836 /* Transition VRR active -> inactive:
6837 * Allow vblank irq disable again for fixed refresh rate.
6839 dm_set_vupdate_irq(new_state->base.crtc, false);
6840 drm_crtc_vblank_put(new_state->base.crtc);
6841 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6842 __func__, new_state->base.crtc->base.id);
6846 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6848 struct drm_plane *plane;
6849 struct drm_plane_state *old_plane_state, *new_plane_state;
6853 * TODO: Make this per-stream so we don't issue redundant updates for
6854 * commits with multiple streams.
6856 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6858 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6859 handle_cursor_update(plane, old_plane_state);
6862 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6863 struct dc_state *dc_state,
6864 struct drm_device *dev,
6865 struct amdgpu_display_manager *dm,
6866 struct drm_crtc *pcrtc,
6867 bool wait_for_vblank)
6870 uint64_t timestamp_ns;
6871 struct drm_plane *plane;
6872 struct drm_plane_state *old_plane_state, *new_plane_state;
6873 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6874 struct drm_crtc_state *new_pcrtc_state =
6875 drm_atomic_get_new_crtc_state(state, pcrtc);
6876 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6877 struct dm_crtc_state *dm_old_crtc_state =
6878 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6879 int planes_count = 0, vpos, hpos;
6881 unsigned long flags;
6882 struct amdgpu_bo *abo;
6883 uint64_t tiling_flags;
6884 bool tmz_surface = false;
6885 uint32_t target_vblank, last_flip_vblank;
6886 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6887 bool pflip_present = false;
6889 struct dc_surface_update surface_updates[MAX_SURFACES];
6890 struct dc_plane_info plane_infos[MAX_SURFACES];
6891 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6892 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6893 struct dc_stream_update stream_update;
6896 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6899 dm_error("Failed to allocate update bundle\n");
6904 * Disable the cursor first if we're disabling all the planes.
6905 * It'll remain on the screen after the planes are re-enabled
6908 if (acrtc_state->active_planes == 0)
6909 amdgpu_dm_commit_cursors(state);
6911 /* update planes when needed */
6912 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6913 struct drm_crtc *crtc = new_plane_state->crtc;
6914 struct drm_crtc_state *new_crtc_state;
6915 struct drm_framebuffer *fb = new_plane_state->fb;
6916 bool plane_needs_flip;
6917 struct dc_plane_state *dc_plane;
6918 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6920 /* Cursor plane is handled after stream updates */
6921 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6924 if (!fb || !crtc || pcrtc != crtc)
6927 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6928 if (!new_crtc_state->active)
6931 dc_plane = dm_new_plane_state->dc_state;
6933 bundle->surface_updates[planes_count].surface = dc_plane;
6934 if (new_pcrtc_state->color_mgmt_changed) {
6935 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6936 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6937 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6940 fill_dc_scaling_info(new_plane_state,
6941 &bundle->scaling_infos[planes_count]);
6943 bundle->surface_updates[planes_count].scaling_info =
6944 &bundle->scaling_infos[planes_count];
6946 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6948 pflip_present = pflip_present || plane_needs_flip;
6950 if (!plane_needs_flip) {
6955 abo = gem_to_amdgpu_bo(fb->obj[0]);
6958 * Wait for all fences on this FB. Do limited wait to avoid
6959 * deadlock during GPU reset when this fence will not signal
6960 * but we hold reservation lock for the BO.
6962 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6964 msecs_to_jiffies(5000));
6965 if (unlikely(r <= 0))
6966 DRM_ERROR("Waiting for fences timed out!");
6969 * TODO This might fail and hence better not used, wait
6970 * explicitly on fences instead
6971 * and in general should be called for
6972 * blocking commit to as per framework helpers
6974 r = amdgpu_bo_reserve(abo, true);
6975 if (unlikely(r != 0))
6976 DRM_ERROR("failed to reserve buffer before flip\n");
6978 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6980 tmz_surface = amdgpu_bo_encrypted(abo);
6982 amdgpu_bo_unreserve(abo);
6984 fill_dc_plane_info_and_addr(
6985 dm->adev, new_plane_state, tiling_flags,
6986 &bundle->plane_infos[planes_count],
6987 &bundle->flip_addrs[planes_count].address,
6991 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6992 new_plane_state->plane->index,
6993 bundle->plane_infos[planes_count].dcc.enable);
6995 bundle->surface_updates[planes_count].plane_info =
6996 &bundle->plane_infos[planes_count];
6999 * Only allow immediate flips for fast updates that don't
7000 * change FB pitch, DCC state, rotation or mirroing.
7002 bundle->flip_addrs[planes_count].flip_immediate =
7003 crtc->state->async_flip &&
7004 acrtc_state->update_type == UPDATE_TYPE_FAST;
7006 timestamp_ns = ktime_get_ns();
7007 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7008 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7009 bundle->surface_updates[planes_count].surface = dc_plane;
7011 if (!bundle->surface_updates[planes_count].surface) {
7012 DRM_ERROR("No surface for CRTC: id=%d\n",
7013 acrtc_attach->crtc_id);
7017 if (plane == pcrtc->primary)
7018 update_freesync_state_on_stream(
7021 acrtc_state->stream,
7023 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7025 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7027 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7028 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7034 if (pflip_present) {
7036 /* Use old throttling in non-vrr fixed refresh rate mode
7037 * to keep flip scheduling based on target vblank counts
7038 * working in a backwards compatible way, e.g., for
7039 * clients using the GLX_OML_sync_control extension or
7040 * DRI3/Present extension with defined target_msc.
7042 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7045 /* For variable refresh rate mode only:
7046 * Get vblank of last completed flip to avoid > 1 vrr
7047 * flips per video frame by use of throttling, but allow
7048 * flip programming anywhere in the possibly large
7049 * variable vrr vblank interval for fine-grained flip
7050 * timing control and more opportunity to avoid stutter
7051 * on late submission of flips.
7053 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7054 last_flip_vblank = acrtc_attach->last_flip_vblank;
7055 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7058 target_vblank = last_flip_vblank + wait_for_vblank;
7061 * Wait until we're out of the vertical blank period before the one
7062 * targeted by the flip
7064 while ((acrtc_attach->enabled &&
7065 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7066 0, &vpos, &hpos, NULL,
7067 NULL, &pcrtc->hwmode)
7068 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7069 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7070 (int)(target_vblank -
7071 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7072 usleep_range(1000, 1100);
7076 * Prepare the flip event for the pageflip interrupt to handle.
7078 * This only works in the case where we've already turned on the
7079 * appropriate hardware blocks (eg. HUBP) so in the transition case
7080 * from 0 -> n planes we have to skip a hardware generated event
7081 * and rely on sending it from software.
7083 if (acrtc_attach->base.state->event &&
7084 acrtc_state->active_planes > 0) {
7085 drm_crtc_vblank_get(pcrtc);
7087 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7089 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7090 prepare_flip_isr(acrtc_attach);
7092 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7095 if (acrtc_state->stream) {
7096 if (acrtc_state->freesync_vrr_info_changed)
7097 bundle->stream_update.vrr_infopacket =
7098 &acrtc_state->stream->vrr_infopacket;
7102 /* Update the planes if changed or disable if we don't have any. */
7103 if ((planes_count || acrtc_state->active_planes == 0) &&
7104 acrtc_state->stream) {
7105 bundle->stream_update.stream = acrtc_state->stream;
7106 if (new_pcrtc_state->mode_changed) {
7107 bundle->stream_update.src = acrtc_state->stream->src;
7108 bundle->stream_update.dst = acrtc_state->stream->dst;
7111 if (new_pcrtc_state->color_mgmt_changed) {
7113 * TODO: This isn't fully correct since we've actually
7114 * already modified the stream in place.
7116 bundle->stream_update.gamut_remap =
7117 &acrtc_state->stream->gamut_remap_matrix;
7118 bundle->stream_update.output_csc_transform =
7119 &acrtc_state->stream->csc_color_matrix;
7120 bundle->stream_update.out_transfer_func =
7121 acrtc_state->stream->out_transfer_func;
7124 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7125 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7126 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7129 * If FreeSync state on the stream has changed then we need to
7130 * re-adjust the min/max bounds now that DC doesn't handle this
7131 * as part of commit.
7133 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7134 amdgpu_dm_vrr_active(acrtc_state)) {
7135 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7136 dc_stream_adjust_vmin_vmax(
7137 dm->dc, acrtc_state->stream,
7138 &acrtc_state->vrr_params.adjust);
7139 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7141 mutex_lock(&dm->dc_lock);
7142 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7143 acrtc_state->stream->link->psr_settings.psr_allow_active)
7144 amdgpu_dm_psr_disable(acrtc_state->stream);
7146 dc_commit_updates_for_stream(dm->dc,
7147 bundle->surface_updates,
7149 acrtc_state->stream,
7150 &bundle->stream_update,
7154 * Enable or disable the interrupts on the backend.
7156 * Most pipes are put into power gating when unused.
7158 * When power gating is enabled on a pipe we lose the
7159 * interrupt enablement state when power gating is disabled.
7161 * So we need to update the IRQ control state in hardware
7162 * whenever the pipe turns on (since it could be previously
7163 * power gated) or off (since some pipes can't be power gated
7166 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7167 dm_update_pflip_irq_state(
7168 (struct amdgpu_device *)dev->dev_private,
7171 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7172 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7173 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7174 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7175 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7176 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7177 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7178 amdgpu_dm_psr_enable(acrtc_state->stream);
7181 mutex_unlock(&dm->dc_lock);
7185 * Update cursor state *after* programming all the planes.
7186 * This avoids redundant programming in the case where we're going
7187 * to be disabling a single plane - those pipes are being disabled.
7189 if (acrtc_state->active_planes)
7190 amdgpu_dm_commit_cursors(state);
7196 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7197 struct drm_atomic_state *state)
7199 struct amdgpu_device *adev = dev->dev_private;
7200 struct amdgpu_dm_connector *aconnector;
7201 struct drm_connector *connector;
7202 struct drm_connector_state *old_con_state, *new_con_state;
7203 struct drm_crtc_state *new_crtc_state;
7204 struct dm_crtc_state *new_dm_crtc_state;
7205 const struct dc_stream_status *status;
7208 /* Notify device removals. */
7209 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7210 if (old_con_state->crtc != new_con_state->crtc) {
7211 /* CRTC changes require notification. */
7215 if (!new_con_state->crtc)
7218 new_crtc_state = drm_atomic_get_new_crtc_state(
7219 state, new_con_state->crtc);
7221 if (!new_crtc_state)
7224 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7228 aconnector = to_amdgpu_dm_connector(connector);
7230 mutex_lock(&adev->dm.audio_lock);
7231 inst = aconnector->audio_inst;
7232 aconnector->audio_inst = -1;
7233 mutex_unlock(&adev->dm.audio_lock);
7235 amdgpu_dm_audio_eld_notify(adev, inst);
7238 /* Notify audio device additions. */
7239 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7240 if (!new_con_state->crtc)
7243 new_crtc_state = drm_atomic_get_new_crtc_state(
7244 state, new_con_state->crtc);
7246 if (!new_crtc_state)
7249 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7252 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7253 if (!new_dm_crtc_state->stream)
7256 status = dc_stream_get_status(new_dm_crtc_state->stream);
7260 aconnector = to_amdgpu_dm_connector(connector);
7262 mutex_lock(&adev->dm.audio_lock);
7263 inst = status->audio_inst;
7264 aconnector->audio_inst = inst;
7265 mutex_unlock(&adev->dm.audio_lock);
7267 amdgpu_dm_audio_eld_notify(adev, inst);
7272 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7273 * @crtc_state: the DRM CRTC state
7274 * @stream_state: the DC stream state.
7276 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7277 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7279 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7280 struct dc_stream_state *stream_state)
7282 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7285 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7286 struct drm_atomic_state *state,
7289 struct drm_crtc *crtc;
7290 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7291 struct amdgpu_device *adev = dev->dev_private;
7295 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7296 * a modeset, being disabled, or have no active planes.
7298 * It's done in atomic commit rather than commit tail for now since
7299 * some of these interrupt handlers access the current CRTC state and
7300 * potentially the stream pointer itself.
7302 * Since the atomic state is swapped within atomic commit and not within
7303 * commit tail this would leave to new state (that hasn't been committed yet)
7304 * being accesssed from within the handlers.
7306 * TODO: Fix this so we can do this in commit tail and not have to block
7309 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7310 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7312 if (old_crtc_state->active &&
7313 (!new_crtc_state->active ||
7314 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7315 manage_dm_interrupts(adev, acrtc, false);
7318 * Add check here for SoC's that support hardware cursor plane, to
7319 * unset legacy_cursor_update
7322 return drm_atomic_helper_commit(dev, state, nonblock);
7324 /*TODO Handle EINTR, reenable IRQ*/
7328 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7329 * @state: The atomic state to commit
7331 * This will tell DC to commit the constructed DC state from atomic_check,
7332 * programming the hardware. Any failures here implies a hardware failure, since
7333 * atomic check should have filtered anything non-kosher.
7335 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7337 struct drm_device *dev = state->dev;
7338 struct amdgpu_device *adev = dev->dev_private;
7339 struct amdgpu_display_manager *dm = &adev->dm;
7340 struct dm_atomic_state *dm_state;
7341 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7343 struct drm_crtc *crtc;
7344 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7345 unsigned long flags;
7346 bool wait_for_vblank = true;
7347 struct drm_connector *connector;
7348 struct drm_connector_state *old_con_state, *new_con_state;
7349 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7350 int crtc_disable_count = 0;
7352 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7354 dm_state = dm_atomic_get_new_state(state);
7355 if (dm_state && dm_state->context) {
7356 dc_state = dm_state->context;
7358 /* No state changes, retain current state. */
7359 dc_state_temp = dc_create_state(dm->dc);
7360 ASSERT(dc_state_temp);
7361 dc_state = dc_state_temp;
7362 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7365 /* update changed items */
7366 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7367 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7369 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7370 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7373 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7374 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7375 "connectors_changed:%d\n",
7377 new_crtc_state->enable,
7378 new_crtc_state->active,
7379 new_crtc_state->planes_changed,
7380 new_crtc_state->mode_changed,
7381 new_crtc_state->active_changed,
7382 new_crtc_state->connectors_changed);
7384 /* Copy all transient state flags into dc state */
7385 if (dm_new_crtc_state->stream) {
7386 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7387 dm_new_crtc_state->stream);
7390 /* handles headless hotplug case, updating new_state and
7391 * aconnector as needed
7394 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7396 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7398 if (!dm_new_crtc_state->stream) {
7400 * this could happen because of issues with
7401 * userspace notifications delivery.
7402 * In this case userspace tries to set mode on
7403 * display which is disconnected in fact.
7404 * dc_sink is NULL in this case on aconnector.
7405 * We expect reset mode will come soon.
7407 * This can also happen when unplug is done
7408 * during resume sequence ended
7410 * In this case, we want to pretend we still
7411 * have a sink to keep the pipe running so that
7412 * hw state is consistent with the sw state
7414 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7415 __func__, acrtc->base.base.id);
7419 if (dm_old_crtc_state->stream)
7420 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7422 pm_runtime_get_noresume(dev->dev);
7424 acrtc->enabled = true;
7425 acrtc->hw_mode = new_crtc_state->mode;
7426 crtc->hwmode = new_crtc_state->mode;
7427 } else if (modereset_required(new_crtc_state)) {
7428 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7429 /* i.e. reset mode */
7430 if (dm_old_crtc_state->stream) {
7431 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7432 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7434 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7437 } /* for_each_crtc_in_state() */
7440 dm_enable_per_frame_crtc_master_sync(dc_state);
7441 mutex_lock(&dm->dc_lock);
7442 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7443 mutex_unlock(&dm->dc_lock);
7446 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7447 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7449 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7451 if (dm_new_crtc_state->stream != NULL) {
7452 const struct dc_stream_status *status =
7453 dc_stream_get_status(dm_new_crtc_state->stream);
7456 status = dc_stream_get_status_from_state(dc_state,
7457 dm_new_crtc_state->stream);
7460 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7462 acrtc->otg_inst = status->primary_otg_inst;
7465 #ifdef CONFIG_DRM_AMD_DC_HDCP
7466 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7467 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7468 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7469 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7471 new_crtc_state = NULL;
7474 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7476 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7478 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7479 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7480 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7481 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7485 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7486 hdcp_update_display(
7487 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7488 new_con_state->hdcp_content_type,
7489 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7494 /* Handle connector state changes */
7495 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7496 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7497 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7498 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7499 struct dc_surface_update dummy_updates[MAX_SURFACES];
7500 struct dc_stream_update stream_update;
7501 struct dc_info_packet hdr_packet;
7502 struct dc_stream_status *status = NULL;
7503 bool abm_changed, hdr_changed, scaling_changed;
7505 memset(&dummy_updates, 0, sizeof(dummy_updates));
7506 memset(&stream_update, 0, sizeof(stream_update));
7509 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7510 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7513 /* Skip any modesets/resets */
7514 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7517 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7518 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7520 scaling_changed = is_scaling_state_different(dm_new_con_state,
7523 abm_changed = dm_new_crtc_state->abm_level !=
7524 dm_old_crtc_state->abm_level;
7527 is_hdr_metadata_different(old_con_state, new_con_state);
7529 if (!scaling_changed && !abm_changed && !hdr_changed)
7532 stream_update.stream = dm_new_crtc_state->stream;
7533 if (scaling_changed) {
7534 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7535 dm_new_con_state, dm_new_crtc_state->stream);
7537 stream_update.src = dm_new_crtc_state->stream->src;
7538 stream_update.dst = dm_new_crtc_state->stream->dst;
7542 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7544 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7548 fill_hdr_info_packet(new_con_state, &hdr_packet);
7549 stream_update.hdr_static_metadata = &hdr_packet;
7552 status = dc_stream_get_status(dm_new_crtc_state->stream);
7554 WARN_ON(!status->plane_count);
7557 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7558 * Here we create an empty update on each plane.
7559 * To fix this, DC should permit updating only stream properties.
7561 for (j = 0; j < status->plane_count; j++)
7562 dummy_updates[j].surface = status->plane_states[0];
7565 mutex_lock(&dm->dc_lock);
7566 dc_commit_updates_for_stream(dm->dc,
7568 status->plane_count,
7569 dm_new_crtc_state->stream,
7572 mutex_unlock(&dm->dc_lock);
7575 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7576 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7577 new_crtc_state, i) {
7578 if (old_crtc_state->active && !new_crtc_state->active)
7579 crtc_disable_count++;
7581 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7582 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7584 /* Update freesync active state. */
7585 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7587 /* Handle vrr on->off / off->on transitions */
7588 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7593 * Enable interrupts for CRTCs that are newly enabled or went through
7594 * a modeset. It was intentionally deferred until after the front end
7595 * state was modified to wait until the OTG was on and so the IRQ
7596 * handlers didn't access stale or invalid state.
7598 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7599 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7601 if (new_crtc_state->active &&
7602 (!old_crtc_state->active ||
7603 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7604 manage_dm_interrupts(adev, acrtc, true);
7605 #ifdef CONFIG_DEBUG_FS
7607 * Frontend may have changed so reapply the CRC capture
7608 * settings for the stream.
7610 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7612 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7613 amdgpu_dm_crtc_configure_crc_source(
7614 crtc, dm_new_crtc_state,
7615 dm_new_crtc_state->crc_src);
7621 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7622 if (new_crtc_state->async_flip)
7623 wait_for_vblank = false;
7625 /* update planes when needed per crtc*/
7626 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7627 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7629 if (dm_new_crtc_state->stream)
7630 amdgpu_dm_commit_planes(state, dc_state, dev,
7631 dm, crtc, wait_for_vblank);
7634 /* Update audio instances for each connector. */
7635 amdgpu_dm_commit_audio(dev, state);
7638 * send vblank event on all events not handled in flip and
7639 * mark consumed event for drm_atomic_helper_commit_hw_done
7641 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7642 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7644 if (new_crtc_state->event)
7645 drm_send_event_locked(dev, &new_crtc_state->event->base);
7647 new_crtc_state->event = NULL;
7649 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7651 /* Signal HW programming completion */
7652 drm_atomic_helper_commit_hw_done(state);
7654 if (wait_for_vblank)
7655 drm_atomic_helper_wait_for_flip_done(dev, state);
7657 drm_atomic_helper_cleanup_planes(dev, state);
7660 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7661 * so we can put the GPU into runtime suspend if we're not driving any
7664 for (i = 0; i < crtc_disable_count; i++)
7665 pm_runtime_put_autosuspend(dev->dev);
7666 pm_runtime_mark_last_busy(dev->dev);
7669 dc_release_state(dc_state_temp);
7673 static int dm_force_atomic_commit(struct drm_connector *connector)
7676 struct drm_device *ddev = connector->dev;
7677 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7678 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7679 struct drm_plane *plane = disconnected_acrtc->base.primary;
7680 struct drm_connector_state *conn_state;
7681 struct drm_crtc_state *crtc_state;
7682 struct drm_plane_state *plane_state;
7687 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7689 /* Construct an atomic state to restore previous display setting */
7692 * Attach connectors to drm_atomic_state
7694 conn_state = drm_atomic_get_connector_state(state, connector);
7696 ret = PTR_ERR_OR_ZERO(conn_state);
7700 /* Attach crtc to drm_atomic_state*/
7701 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7703 ret = PTR_ERR_OR_ZERO(crtc_state);
7707 /* force a restore */
7708 crtc_state->mode_changed = true;
7710 /* Attach plane to drm_atomic_state */
7711 plane_state = drm_atomic_get_plane_state(state, plane);
7713 ret = PTR_ERR_OR_ZERO(plane_state);
7718 /* Call commit internally with the state we just constructed */
7719 ret = drm_atomic_commit(state);
7724 DRM_ERROR("Restoring old state failed with %i\n", ret);
7725 drm_atomic_state_put(state);
7731 * This function handles all cases when set mode does not come upon hotplug.
7732 * This includes when a display is unplugged then plugged back into the
7733 * same port and when running without usermode desktop manager supprot
7735 void dm_restore_drm_connector_state(struct drm_device *dev,
7736 struct drm_connector *connector)
7738 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7739 struct amdgpu_crtc *disconnected_acrtc;
7740 struct dm_crtc_state *acrtc_state;
7742 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7745 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7746 if (!disconnected_acrtc)
7749 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7750 if (!acrtc_state->stream)
7754 * If the previous sink is not released and different from the current,
7755 * we deduce we are in a state where we can not rely on usermode call
7756 * to turn on the display, so we do it here
7758 if (acrtc_state->stream->sink != aconnector->dc_sink)
7759 dm_force_atomic_commit(&aconnector->base);
7763 * Grabs all modesetting locks to serialize against any blocking commits,
7764 * Waits for completion of all non blocking commits.
7766 static int do_aquire_global_lock(struct drm_device *dev,
7767 struct drm_atomic_state *state)
7769 struct drm_crtc *crtc;
7770 struct drm_crtc_commit *commit;
7774 * Adding all modeset locks to aquire_ctx will
7775 * ensure that when the framework release it the
7776 * extra locks we are locking here will get released to
7778 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7782 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7783 spin_lock(&crtc->commit_lock);
7784 commit = list_first_entry_or_null(&crtc->commit_list,
7785 struct drm_crtc_commit, commit_entry);
7787 drm_crtc_commit_get(commit);
7788 spin_unlock(&crtc->commit_lock);
7794 * Make sure all pending HW programming completed and
7797 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7800 ret = wait_for_completion_interruptible_timeout(
7801 &commit->flip_done, 10*HZ);
7804 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7805 "timed out\n", crtc->base.id, crtc->name);
7807 drm_crtc_commit_put(commit);
7810 return ret < 0 ? ret : 0;
7813 static void get_freesync_config_for_crtc(
7814 struct dm_crtc_state *new_crtc_state,
7815 struct dm_connector_state *new_con_state)
7817 struct mod_freesync_config config = {0};
7818 struct amdgpu_dm_connector *aconnector =
7819 to_amdgpu_dm_connector(new_con_state->base.connector);
7820 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7821 int vrefresh = drm_mode_vrefresh(mode);
7823 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7824 vrefresh >= aconnector->min_vfreq &&
7825 vrefresh <= aconnector->max_vfreq;
7827 if (new_crtc_state->vrr_supported) {
7828 new_crtc_state->stream->ignore_msa_timing_param = true;
7829 config.state = new_crtc_state->base.vrr_enabled ?
7830 VRR_STATE_ACTIVE_VARIABLE :
7832 config.min_refresh_in_uhz =
7833 aconnector->min_vfreq * 1000000;
7834 config.max_refresh_in_uhz =
7835 aconnector->max_vfreq * 1000000;
7836 config.vsif_supported = true;
7840 new_crtc_state->freesync_config = config;
7843 static void reset_freesync_config_for_crtc(
7844 struct dm_crtc_state *new_crtc_state)
7846 new_crtc_state->vrr_supported = false;
7848 memset(&new_crtc_state->vrr_params, 0,
7849 sizeof(new_crtc_state->vrr_params));
7850 memset(&new_crtc_state->vrr_infopacket, 0,
7851 sizeof(new_crtc_state->vrr_infopacket));
7854 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7855 struct drm_atomic_state *state,
7856 struct drm_crtc *crtc,
7857 struct drm_crtc_state *old_crtc_state,
7858 struct drm_crtc_state *new_crtc_state,
7860 bool *lock_and_validation_needed)
7862 struct dm_atomic_state *dm_state = NULL;
7863 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7864 struct dc_stream_state *new_stream;
7868 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7869 * update changed items
7871 struct amdgpu_crtc *acrtc = NULL;
7872 struct amdgpu_dm_connector *aconnector = NULL;
7873 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7874 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7878 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7879 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7880 acrtc = to_amdgpu_crtc(crtc);
7881 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7883 /* TODO This hack should go away */
7884 if (aconnector && enable) {
7885 /* Make sure fake sink is created in plug-in scenario */
7886 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7888 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7891 if (IS_ERR(drm_new_conn_state)) {
7892 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7896 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7897 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7899 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7902 new_stream = create_validate_stream_for_sink(aconnector,
7903 &new_crtc_state->mode,
7905 dm_old_crtc_state->stream);
7908 * we can have no stream on ACTION_SET if a display
7909 * was disconnected during S3, in this case it is not an
7910 * error, the OS will be updated after detection, and
7911 * will do the right thing on next atomic commit
7915 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7916 __func__, acrtc->base.base.id);
7921 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7923 ret = fill_hdr_info_packet(drm_new_conn_state,
7924 &new_stream->hdr_static_metadata);
7929 * If we already removed the old stream from the context
7930 * (and set the new stream to NULL) then we can't reuse
7931 * the old stream even if the stream and scaling are unchanged.
7932 * We'll hit the BUG_ON and black screen.
7934 * TODO: Refactor this function to allow this check to work
7935 * in all conditions.
7937 if (dm_new_crtc_state->stream &&
7938 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7939 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7940 new_crtc_state->mode_changed = false;
7941 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7942 new_crtc_state->mode_changed);
7946 /* mode_changed flag may get updated above, need to check again */
7947 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7951 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7952 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7953 "connectors_changed:%d\n",
7955 new_crtc_state->enable,
7956 new_crtc_state->active,
7957 new_crtc_state->planes_changed,
7958 new_crtc_state->mode_changed,
7959 new_crtc_state->active_changed,
7960 new_crtc_state->connectors_changed);
7962 /* Remove stream for any changed/disabled CRTC */
7965 if (!dm_old_crtc_state->stream)
7968 ret = dm_atomic_get_state(state, &dm_state);
7972 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7975 /* i.e. reset mode */
7976 if (dc_remove_stream_from_ctx(
7979 dm_old_crtc_state->stream) != DC_OK) {
7984 dc_stream_release(dm_old_crtc_state->stream);
7985 dm_new_crtc_state->stream = NULL;
7987 reset_freesync_config_for_crtc(dm_new_crtc_state);
7989 *lock_and_validation_needed = true;
7991 } else {/* Add stream for any updated/enabled CRTC */
7993 * Quick fix to prevent NULL pointer on new_stream when
7994 * added MST connectors not found in existing crtc_state in the chained mode
7995 * TODO: need to dig out the root cause of that
7997 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8000 if (modereset_required(new_crtc_state))
8003 if (modeset_required(new_crtc_state, new_stream,
8004 dm_old_crtc_state->stream)) {
8006 WARN_ON(dm_new_crtc_state->stream);
8008 ret = dm_atomic_get_state(state, &dm_state);
8012 dm_new_crtc_state->stream = new_stream;
8014 dc_stream_retain(new_stream);
8016 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8019 if (dc_add_stream_to_ctx(
8022 dm_new_crtc_state->stream) != DC_OK) {
8027 *lock_and_validation_needed = true;
8032 /* Release extra reference */
8034 dc_stream_release(new_stream);
8037 * We want to do dc stream updates that do not require a
8038 * full modeset below.
8040 if (!(enable && aconnector && new_crtc_state->enable &&
8041 new_crtc_state->active))
8044 * Given above conditions, the dc state cannot be NULL because:
8045 * 1. We're in the process of enabling CRTCs (just been added
8046 * to the dc context, or already is on the context)
8047 * 2. Has a valid connector attached, and
8048 * 3. Is currently active and enabled.
8049 * => The dc stream state currently exists.
8051 BUG_ON(dm_new_crtc_state->stream == NULL);
8053 /* Scaling or underscan settings */
8054 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8055 update_stream_scaling_settings(
8056 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8059 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8062 * Color management settings. We also update color properties
8063 * when a modeset is needed, to ensure it gets reprogrammed.
8065 if (dm_new_crtc_state->base.color_mgmt_changed ||
8066 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8067 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8072 /* Update Freesync settings. */
8073 get_freesync_config_for_crtc(dm_new_crtc_state,
8080 dc_stream_release(new_stream);
8084 static bool should_reset_plane(struct drm_atomic_state *state,
8085 struct drm_plane *plane,
8086 struct drm_plane_state *old_plane_state,
8087 struct drm_plane_state *new_plane_state)
8089 struct drm_plane *other;
8090 struct drm_plane_state *old_other_state, *new_other_state;
8091 struct drm_crtc_state *new_crtc_state;
8095 * TODO: Remove this hack once the checks below are sufficient
8096 * enough to determine when we need to reset all the planes on
8099 if (state->allow_modeset)
8102 /* Exit early if we know that we're adding or removing the plane. */
8103 if (old_plane_state->crtc != new_plane_state->crtc)
8106 /* old crtc == new_crtc == NULL, plane not in context. */
8107 if (!new_plane_state->crtc)
8111 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8113 if (!new_crtc_state)
8116 /* CRTC Degamma changes currently require us to recreate planes. */
8117 if (new_crtc_state->color_mgmt_changed)
8120 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8124 * If there are any new primary or overlay planes being added or
8125 * removed then the z-order can potentially change. To ensure
8126 * correct z-order and pipe acquisition the current DC architecture
8127 * requires us to remove and recreate all existing planes.
8129 * TODO: Come up with a more elegant solution for this.
8131 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8132 if (other->type == DRM_PLANE_TYPE_CURSOR)
8135 if (old_other_state->crtc != new_plane_state->crtc &&
8136 new_other_state->crtc != new_plane_state->crtc)
8139 if (old_other_state->crtc != new_other_state->crtc)
8142 /* TODO: Remove this once we can handle fast format changes. */
8143 if (old_other_state->fb && new_other_state->fb &&
8144 old_other_state->fb->format != new_other_state->fb->format)
8151 static int dm_update_plane_state(struct dc *dc,
8152 struct drm_atomic_state *state,
8153 struct drm_plane *plane,
8154 struct drm_plane_state *old_plane_state,
8155 struct drm_plane_state *new_plane_state,
8157 bool *lock_and_validation_needed)
8160 struct dm_atomic_state *dm_state = NULL;
8161 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8162 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8163 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8164 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8165 struct amdgpu_crtc *new_acrtc;
8170 new_plane_crtc = new_plane_state->crtc;
8171 old_plane_crtc = old_plane_state->crtc;
8172 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8173 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8175 /*TODO Implement better atomic check for cursor plane */
8176 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8177 if (!enable || !new_plane_crtc ||
8178 drm_atomic_plane_disabling(plane->state, new_plane_state))
8181 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8183 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8184 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8185 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8186 new_plane_state->crtc_w, new_plane_state->crtc_h);
8193 needs_reset = should_reset_plane(state, plane, old_plane_state,
8196 /* Remove any changed/removed planes */
8201 if (!old_plane_crtc)
8204 old_crtc_state = drm_atomic_get_old_crtc_state(
8205 state, old_plane_crtc);
8206 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8208 if (!dm_old_crtc_state->stream)
8211 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8212 plane->base.id, old_plane_crtc->base.id);
8214 ret = dm_atomic_get_state(state, &dm_state);
8218 if (!dc_remove_plane_from_context(
8220 dm_old_crtc_state->stream,
8221 dm_old_plane_state->dc_state,
8222 dm_state->context)) {
8229 dc_plane_state_release(dm_old_plane_state->dc_state);
8230 dm_new_plane_state->dc_state = NULL;
8232 *lock_and_validation_needed = true;
8234 } else { /* Add new planes */
8235 struct dc_plane_state *dc_new_plane_state;
8237 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8240 if (!new_plane_crtc)
8243 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8244 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8246 if (!dm_new_crtc_state->stream)
8252 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8256 WARN_ON(dm_new_plane_state->dc_state);
8258 dc_new_plane_state = dc_create_plane_state(dc);
8259 if (!dc_new_plane_state)
8262 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8263 plane->base.id, new_plane_crtc->base.id);
8265 ret = fill_dc_plane_attributes(
8266 new_plane_crtc->dev->dev_private,
8271 dc_plane_state_release(dc_new_plane_state);
8275 ret = dm_atomic_get_state(state, &dm_state);
8277 dc_plane_state_release(dc_new_plane_state);
8282 * Any atomic check errors that occur after this will
8283 * not need a release. The plane state will be attached
8284 * to the stream, and therefore part of the atomic
8285 * state. It'll be released when the atomic state is
8288 if (!dc_add_plane_to_context(
8290 dm_new_crtc_state->stream,
8292 dm_state->context)) {
8294 dc_plane_state_release(dc_new_plane_state);
8298 dm_new_plane_state->dc_state = dc_new_plane_state;
8300 /* Tell DC to do a full surface update every time there
8301 * is a plane change. Inefficient, but works for now.
8303 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8305 *lock_and_validation_needed = true;
8313 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8314 struct drm_atomic_state *state,
8315 enum surface_update_type *out_type)
8317 struct dc *dc = dm->dc;
8318 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8319 int i, j, num_plane, ret = 0;
8320 struct drm_plane_state *old_plane_state, *new_plane_state;
8321 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8322 struct drm_crtc *new_plane_crtc;
8323 struct drm_plane *plane;
8325 struct drm_crtc *crtc;
8326 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8327 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8328 struct dc_stream_status *status = NULL;
8329 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8330 struct surface_info_bundle {
8331 struct dc_surface_update surface_updates[MAX_SURFACES];
8332 struct dc_plane_info plane_infos[MAX_SURFACES];
8333 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8334 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8335 struct dc_stream_update stream_update;
8338 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8341 DRM_ERROR("Failed to allocate update bundle\n");
8342 /* Set type to FULL to avoid crashing in DC*/
8343 update_type = UPDATE_TYPE_FULL;
8347 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8349 memset(bundle, 0, sizeof(struct surface_info_bundle));
8351 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8352 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8355 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8356 update_type = UPDATE_TYPE_FULL;
8360 if (!new_dm_crtc_state->stream)
8363 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8364 const struct amdgpu_framebuffer *amdgpu_fb =
8365 to_amdgpu_framebuffer(new_plane_state->fb);
8366 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8367 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8368 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8369 uint64_t tiling_flags;
8370 bool tmz_surface = false;
8372 new_plane_crtc = new_plane_state->crtc;
8373 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8374 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8376 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8379 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8380 update_type = UPDATE_TYPE_FULL;
8384 if (crtc != new_plane_crtc)
8387 bundle->surface_updates[num_plane].surface =
8388 new_dm_plane_state->dc_state;
8390 if (new_crtc_state->mode_changed) {
8391 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8392 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8395 if (new_crtc_state->color_mgmt_changed) {
8396 bundle->surface_updates[num_plane].gamma =
8397 new_dm_plane_state->dc_state->gamma_correction;
8398 bundle->surface_updates[num_plane].in_transfer_func =
8399 new_dm_plane_state->dc_state->in_transfer_func;
8400 bundle->surface_updates[num_plane].gamut_remap_matrix =
8401 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8402 bundle->stream_update.gamut_remap =
8403 &new_dm_crtc_state->stream->gamut_remap_matrix;
8404 bundle->stream_update.output_csc_transform =
8405 &new_dm_crtc_state->stream->csc_color_matrix;
8406 bundle->stream_update.out_transfer_func =
8407 new_dm_crtc_state->stream->out_transfer_func;
8410 ret = fill_dc_scaling_info(new_plane_state,
8415 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8418 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8422 ret = fill_dc_plane_info_and_addr(
8423 dm->adev, new_plane_state, tiling_flags,
8425 &flip_addr->address, tmz_surface,
8430 bundle->surface_updates[num_plane].plane_info = plane_info;
8431 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8440 ret = dm_atomic_get_state(state, &dm_state);
8444 old_dm_state = dm_atomic_get_old_state(state);
8445 if (!old_dm_state) {
8450 status = dc_stream_get_status_from_state(old_dm_state->context,
8451 new_dm_crtc_state->stream);
8452 bundle->stream_update.stream = new_dm_crtc_state->stream;
8454 * TODO: DC modifies the surface during this call so we need
8455 * to lock here - find a way to do this without locking.
8457 mutex_lock(&dm->dc_lock);
8458 update_type = dc_check_update_surfaces_for_stream(
8459 dc, bundle->surface_updates, num_plane,
8460 &bundle->stream_update, status);
8461 mutex_unlock(&dm->dc_lock);
8463 if (update_type > UPDATE_TYPE_MED) {
8464 update_type = UPDATE_TYPE_FULL;
8472 *out_type = update_type;
8475 #if defined(CONFIG_DRM_AMD_DC_DCN)
8476 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8478 struct drm_connector *connector;
8479 struct drm_connector_state *conn_state;
8480 struct amdgpu_dm_connector *aconnector = NULL;
8482 for_each_new_connector_in_state(state, connector, conn_state, i) {
8483 if (conn_state->crtc != crtc)
8486 aconnector = to_amdgpu_dm_connector(connector);
8487 if (!aconnector->port || !aconnector->mst_port)
8496 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8501 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8502 * @dev: The DRM device
8503 * @state: The atomic state to commit
8505 * Validate that the given atomic state is programmable by DC into hardware.
8506 * This involves constructing a &struct dc_state reflecting the new hardware
8507 * state we wish to commit, then querying DC to see if it is programmable. It's
8508 * important not to modify the existing DC state. Otherwise, atomic_check
8509 * may unexpectedly commit hardware changes.
8511 * When validating the DC state, it's important that the right locks are
8512 * acquired. For full updates case which removes/adds/updates streams on one
8513 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8514 * that any such full update commit will wait for completion of any outstanding
8515 * flip using DRMs synchronization events. See
8516 * dm_determine_update_type_for_commit()
8518 * Note that DM adds the affected connectors for all CRTCs in state, when that
8519 * might not seem necessary. This is because DC stream creation requires the
8520 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8521 * be possible but non-trivial - a possible TODO item.
8523 * Return: -Error code if validation failed.
8525 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8526 struct drm_atomic_state *state)
8528 struct amdgpu_device *adev = dev->dev_private;
8529 struct dm_atomic_state *dm_state = NULL;
8530 struct dc *dc = adev->dm.dc;
8531 struct drm_connector *connector;
8532 struct drm_connector_state *old_con_state, *new_con_state;
8533 struct drm_crtc *crtc;
8534 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8535 struct drm_plane *plane;
8536 struct drm_plane_state *old_plane_state, *new_plane_state;
8537 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8538 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8539 enum dc_status status;
8543 * This bool will be set for true for any modeset/reset
8544 * or plane update which implies non fast surface update.
8546 bool lock_and_validation_needed = false;
8548 ret = drm_atomic_helper_check_modeset(dev, state);
8552 /* Check connector changes */
8553 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8554 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8555 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8557 /* Skip connectors that are disabled or part of modeset already. */
8558 if (!old_con_state->crtc && !new_con_state->crtc)
8561 if (!new_con_state->crtc)
8564 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8565 if (IS_ERR(new_crtc_state)) {
8566 ret = PTR_ERR(new_crtc_state);
8570 if (dm_old_con_state->abm_level !=
8571 dm_new_con_state->abm_level)
8572 new_crtc_state->connectors_changed = true;
8575 #if defined(CONFIG_DRM_AMD_DC_DCN)
8576 if (adev->asic_type >= CHIP_NAVI10) {
8577 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8578 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8579 ret = add_affected_mst_dsc_crtcs(state, crtc);
8586 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8587 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8588 !new_crtc_state->color_mgmt_changed &&
8589 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8592 if (!new_crtc_state->enable)
8595 ret = drm_atomic_add_affected_connectors(state, crtc);
8599 ret = drm_atomic_add_affected_planes(state, crtc);
8605 * Add all primary and overlay planes on the CRTC to the state
8606 * whenever a plane is enabled to maintain correct z-ordering
8607 * and to enable fast surface updates.
8609 drm_for_each_crtc(crtc, dev) {
8610 bool modified = false;
8612 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8613 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8616 if (new_plane_state->crtc == crtc ||
8617 old_plane_state->crtc == crtc) {
8626 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8627 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8631 drm_atomic_get_plane_state(state, plane);
8633 if (IS_ERR(new_plane_state)) {
8634 ret = PTR_ERR(new_plane_state);
8640 /* Remove exiting planes if they are modified */
8641 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8642 ret = dm_update_plane_state(dc, state, plane,
8646 &lock_and_validation_needed);
8651 /* Disable all crtcs which require disable */
8652 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8653 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8657 &lock_and_validation_needed);
8662 /* Enable all crtcs which require enable */
8663 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8664 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8668 &lock_and_validation_needed);
8673 /* Add new/modified planes */
8674 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8675 ret = dm_update_plane_state(dc, state, plane,
8679 &lock_and_validation_needed);
8684 /* Run this here since we want to validate the streams we created */
8685 ret = drm_atomic_helper_check_planes(dev, state);
8689 if (state->legacy_cursor_update) {
8691 * This is a fast cursor update coming from the plane update
8692 * helper, check if it can be done asynchronously for better
8695 state->async_update =
8696 !drm_atomic_helper_async_check(dev, state);
8699 * Skip the remaining global validation if this is an async
8700 * update. Cursor updates can be done without affecting
8701 * state or bandwidth calcs and this avoids the performance
8702 * penalty of locking the private state object and
8703 * allocating a new dc_state.
8705 if (state->async_update)
8709 /* Check scaling and underscan changes*/
8710 /* TODO Removed scaling changes validation due to inability to commit
8711 * new stream into context w\o causing full reset. Need to
8712 * decide how to handle.
8714 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8715 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8716 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8717 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8719 /* Skip any modesets/resets */
8720 if (!acrtc || drm_atomic_crtc_needs_modeset(
8721 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8724 /* Skip any thing not scale or underscan changes */
8725 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8728 overall_update_type = UPDATE_TYPE_FULL;
8729 lock_and_validation_needed = true;
8732 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8736 if (overall_update_type < update_type)
8737 overall_update_type = update_type;
8740 * lock_and_validation_needed was an old way to determine if we need to set
8741 * the global lock. Leaving it in to check if we broke any corner cases
8742 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8743 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8745 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8746 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8748 if (overall_update_type > UPDATE_TYPE_FAST) {
8749 ret = dm_atomic_get_state(state, &dm_state);
8753 ret = do_aquire_global_lock(dev, state);
8757 #if defined(CONFIG_DRM_AMD_DC_DCN)
8758 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8761 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8767 * Perform validation of MST topology in the state:
8768 * We need to perform MST atomic check before calling
8769 * dc_validate_global_state(), or there is a chance
8770 * to get stuck in an infinite loop and hang eventually.
8772 ret = drm_dp_mst_atomic_check(state);
8775 status = dc_validate_global_state(dc, dm_state->context, false);
8776 if (status != DC_OK) {
8777 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8778 dc_status_to_str(status), status);
8784 * The commit is a fast update. Fast updates shouldn't change
8785 * the DC context, affect global validation, and can have their
8786 * commit work done in parallel with other commits not touching
8787 * the same resource. If we have a new DC context as part of
8788 * the DM atomic state from validation we need to free it and
8789 * retain the existing one instead.
8791 * Furthermore, since the DM atomic state only contains the DC
8792 * context and can safely be annulled, we can free the state
8793 * and clear the associated private object now to free
8794 * some memory and avoid a possible use-after-free later.
8797 for (i = 0; i < state->num_private_objs; i++) {
8798 struct drm_private_obj *obj = state->private_objs[i].ptr;
8800 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8801 int j = state->num_private_objs-1;
8803 dm_atomic_destroy_state(obj,
8804 state->private_objs[i].state);
8806 /* If i is not at the end of the array then the
8807 * last element needs to be moved to where i was
8808 * before the array can safely be truncated.
8811 state->private_objs[i] =
8812 state->private_objs[j];
8814 state->private_objs[j].ptr = NULL;
8815 state->private_objs[j].state = NULL;
8816 state->private_objs[j].old_state = NULL;
8817 state->private_objs[j].new_state = NULL;
8819 state->num_private_objs = j;
8825 /* Store the overall update type for use later in atomic check. */
8826 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8827 struct dm_crtc_state *dm_new_crtc_state =
8828 to_dm_crtc_state(new_crtc_state);
8830 dm_new_crtc_state->update_type = (int)overall_update_type;
8833 /* Must be success */
8838 if (ret == -EDEADLK)
8839 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8840 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8841 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8843 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8848 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8849 struct amdgpu_dm_connector *amdgpu_dm_connector)
8852 bool capable = false;
8854 if (amdgpu_dm_connector->dc_link &&
8855 dm_helpers_dp_read_dpcd(
8857 amdgpu_dm_connector->dc_link,
8858 DP_DOWN_STREAM_PORT_COUNT,
8860 sizeof(dpcd_data))) {
8861 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8866 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8870 bool edid_check_required;
8871 struct detailed_timing *timing;
8872 struct detailed_non_pixel *data;
8873 struct detailed_data_monitor_range *range;
8874 struct amdgpu_dm_connector *amdgpu_dm_connector =
8875 to_amdgpu_dm_connector(connector);
8876 struct dm_connector_state *dm_con_state = NULL;
8878 struct drm_device *dev = connector->dev;
8879 struct amdgpu_device *adev = dev->dev_private;
8880 bool freesync_capable = false;
8882 if (!connector->state) {
8883 DRM_ERROR("%s - Connector has no state", __func__);
8888 dm_con_state = to_dm_connector_state(connector->state);
8890 amdgpu_dm_connector->min_vfreq = 0;
8891 amdgpu_dm_connector->max_vfreq = 0;
8892 amdgpu_dm_connector->pixel_clock_mhz = 0;
8897 dm_con_state = to_dm_connector_state(connector->state);
8899 edid_check_required = false;
8900 if (!amdgpu_dm_connector->dc_sink) {
8901 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8904 if (!adev->dm.freesync_module)
8907 * if edid non zero restrict freesync only for dp and edp
8910 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8911 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8912 edid_check_required = is_dp_capable_without_timing_msa(
8914 amdgpu_dm_connector);
8917 if (edid_check_required == true && (edid->version > 1 ||
8918 (edid->version == 1 && edid->revision > 1))) {
8919 for (i = 0; i < 4; i++) {
8921 timing = &edid->detailed_timings[i];
8922 data = &timing->data.other_data;
8923 range = &data->data.range;
8925 * Check if monitor has continuous frequency mode
8927 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8930 * Check for flag range limits only. If flag == 1 then
8931 * no additional timing information provided.
8932 * Default GTF, GTF Secondary curve and CVT are not
8935 if (range->flags != 1)
8938 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8939 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8940 amdgpu_dm_connector->pixel_clock_mhz =
8941 range->pixel_clock_mhz * 10;
8945 if (amdgpu_dm_connector->max_vfreq -
8946 amdgpu_dm_connector->min_vfreq > 10) {
8948 freesync_capable = true;
8954 dm_con_state->freesync_capable = freesync_capable;
8956 if (connector->vrr_capable_property)
8957 drm_connector_set_vrr_capable_property(connector,
8961 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8963 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8965 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8967 if (link->type == dc_connection_none)
8969 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8970 dpcd_data, sizeof(dpcd_data))) {
8971 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8973 if (dpcd_data[0] == 0) {
8974 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8975 link->psr_settings.psr_feature_enabled = false;
8977 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8978 link->psr_settings.psr_feature_enabled = true;
8981 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8986 * amdgpu_dm_link_setup_psr() - configure psr link
8987 * @stream: stream state
8989 * Return: true if success
8991 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8993 struct dc_link *link = NULL;
8994 struct psr_config psr_config = {0};
8995 struct psr_context psr_context = {0};
9001 link = stream->link;
9003 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9005 if (psr_config.psr_version > 0) {
9006 psr_config.psr_exit_link_training_required = 0x1;
9007 psr_config.psr_frame_capture_indication_req = 0;
9008 psr_config.psr_rfb_setup_time = 0x37;
9009 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9010 psr_config.allow_smu_optimizations = 0x0;
9012 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9015 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9021 * amdgpu_dm_psr_enable() - enable psr f/w
9022 * @stream: stream state
9024 * Return: true if success
9026 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9028 struct dc_link *link = stream->link;
9029 unsigned int vsync_rate_hz = 0;
9030 struct dc_static_screen_params params = {0};
9031 /* Calculate number of static frames before generating interrupt to
9034 // Init fail safe of 2 frames static
9035 unsigned int num_frames_static = 2;
9037 DRM_DEBUG_DRIVER("Enabling psr...\n");
9039 vsync_rate_hz = div64_u64(div64_u64((
9040 stream->timing.pix_clk_100hz * 100),
9041 stream->timing.v_total),
9042 stream->timing.h_total);
9045 * Calculate number of frames such that at least 30 ms of time has
9048 if (vsync_rate_hz != 0) {
9049 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9050 num_frames_static = (30000 / frame_time_microsec) + 1;
9053 params.triggers.cursor_update = true;
9054 params.triggers.overlay_update = true;
9055 params.triggers.surface_update = true;
9056 params.num_frames = num_frames_static;
9058 dc_stream_set_static_screen_params(link->ctx->dc,
9062 return dc_link_set_psr_allow_active(link, true, false);
9066 * amdgpu_dm_psr_disable() - disable psr f/w
9067 * @stream: stream state
9069 * Return: true if success
9071 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9074 DRM_DEBUG_DRIVER("Disabling psr...\n");
9076 return dc_link_set_psr_allow_active(stream->link, false, true);