2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
132 switch (link->dpcd_caps.dongle_type) {
133 case DISPLAY_DONGLE_NONE:
134 return DRM_MODE_SUBCONNECTOR_Native;
135 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136 return DRM_MODE_SUBCONNECTOR_VGA;
137 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138 case DISPLAY_DONGLE_DP_DVI_DONGLE:
139 return DRM_MODE_SUBCONNECTOR_DVID;
140 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142 return DRM_MODE_SUBCONNECTOR_HDMIA;
143 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
145 return DRM_MODE_SUBCONNECTOR_Unknown;
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
151 struct dc_link *link = aconnector->dc_link;
152 struct drm_connector *connector = &aconnector->base;
153 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
155 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 if (aconnector->dc_sink)
159 subconnector = get_subconnector_type(link);
161 drm_object_property_set_value(&connector->base,
162 connector->dev->mode_config.dp_subconnector_property,
167 * initializes drm_device display related structures, based on the information
168 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169 * drm_encoder, drm_mode_config
171 * Returns 0 on success
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178 struct drm_plane *plane,
179 unsigned long possible_crtcs,
180 const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185 struct amdgpu_dm_connector *amdgpu_dm_connector,
187 struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189 struct amdgpu_encoder *aencoder,
190 uint32_t link_index);
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195 struct drm_atomic_state *state,
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 * dm_vblank_get_counter
216 * Get counter for number of vertical blanks
219 * struct amdgpu_device *adev - [in] desired amdgpu device
220 * int disp_idx - [in] which CRTC to get the counter from
223 * Counter for vertical blanks
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
227 if (crtc >= adev->mode_info.num_crtc)
230 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
231 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
235 if (acrtc_state->stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc_state->stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
257 if (acrtc_state->stream == NULL) {
258 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
264 * TODO rework base driver to use values directly.
265 * for now parse it back into reg-format
267 dc_stream_get_scanoutpos(acrtc_state->stream,
273 *position = v_position | (h_position << 16);
274 *vbl = v_blank_start | (v_blank_end << 16);
280 static bool dm_is_idle(void *handle)
286 static int dm_wait_for_idle(void *handle)
292 static bool dm_check_soft_reset(void *handle)
297 static int dm_soft_reset(void *handle)
303 static struct amdgpu_crtc *
304 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 struct drm_device *dev = adev->ddev;
308 struct drm_crtc *crtc;
309 struct amdgpu_crtc *amdgpu_crtc;
311 if (otg_inst == -1) {
313 return adev->mode_info.crtcs[0];
316 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
317 amdgpu_crtc = to_amdgpu_crtc(crtc);
319 if (amdgpu_crtc->otg_inst == otg_inst)
326 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
328 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
329 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
333 * dm_pflip_high_irq() - Handle pageflip interrupt
334 * @interrupt_params: ignored
336 * Handles the pageflip interrupt by notifying all interested parties
337 * that the pageflip has been completed.
339 static void dm_pflip_high_irq(void *interrupt_params)
341 struct amdgpu_crtc *amdgpu_crtc;
342 struct common_irq_params *irq_params = interrupt_params;
343 struct amdgpu_device *adev = irq_params->adev;
345 struct drm_pending_vblank_event *e;
346 struct dm_crtc_state *acrtc_state;
347 uint32_t vpos, hpos, v_blank_start, v_blank_end;
350 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
352 /* IRQ could occur when in initial stage */
353 /* TODO work and BO cleanup */
354 if (amdgpu_crtc == NULL) {
355 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
359 spin_lock_irqsave(&adev->ddev->event_lock, flags);
361 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
362 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
363 amdgpu_crtc->pflip_status,
364 AMDGPU_FLIP_SUBMITTED,
365 amdgpu_crtc->crtc_id,
367 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
371 /* page flip completed. */
372 e = amdgpu_crtc->event;
373 amdgpu_crtc->event = NULL;
378 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
379 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
381 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
383 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
384 &v_blank_end, &hpos, &vpos) ||
385 (vpos < v_blank_start)) {
386 /* Update to correct count and vblank timestamp if racing with
387 * vblank irq. This also updates to the correct vblank timestamp
388 * even in VRR mode, as scanout is past the front-porch atm.
390 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
392 /* Wake up userspace by sending the pageflip event with proper
393 * count and timestamp of vblank of flip completion.
396 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
398 /* Event sent, so done with vblank for this flip */
399 drm_crtc_vblank_put(&amdgpu_crtc->base);
402 /* VRR active and inside front-porch: vblank count and
403 * timestamp for pageflip event will only be up to date after
404 * drm_crtc_handle_vblank() has been executed from late vblank
405 * irq handler after start of back-porch (vline 0). We queue the
406 * pageflip event for send-out by drm_crtc_handle_vblank() with
407 * updated timestamp and count, once it runs after us.
409 * We need to open-code this instead of using the helper
410 * drm_crtc_arm_vblank_event(), as that helper would
411 * call drm_crtc_accurate_vblank_count(), which we must
412 * not call in VRR mode while we are in front-porch!
415 /* sequence will be replaced by real count during send-out. */
416 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
417 e->pipe = amdgpu_crtc->crtc_id;
419 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
423 /* Keep track of vblank of this flip for flip throttling. We use the
424 * cooked hw counter, as that one incremented at start of this vblank
425 * of pageflip completion, so last_flip_vblank is the forbidden count
426 * for queueing new pageflips if vsync + VRR is enabled.
428 amdgpu_crtc->last_flip_vblank =
429 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
431 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
432 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
434 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
435 amdgpu_crtc->crtc_id, amdgpu_crtc,
436 vrr_active, (int) !e);
439 static void dm_vupdate_high_irq(void *interrupt_params)
441 struct common_irq_params *irq_params = interrupt_params;
442 struct amdgpu_device *adev = irq_params->adev;
443 struct amdgpu_crtc *acrtc;
444 struct dm_crtc_state *acrtc_state;
447 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
450 acrtc_state = to_dm_crtc_state(acrtc->base.state);
452 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
454 amdgpu_dm_vrr_active(acrtc_state));
456 /* Core vblank handling is done here after end of front-porch in
457 * vrr mode, as vblank timestamping will give valid results
458 * while now done after front-porch. This will also deliver
459 * page-flip completion events that have been queued to us
460 * if a pageflip happened inside front-porch.
462 if (amdgpu_dm_vrr_active(acrtc_state)) {
463 drm_crtc_handle_vblank(&acrtc->base);
465 /* BTR processing for pre-DCE12 ASICs */
466 if (acrtc_state->stream &&
467 adev->family < AMDGPU_FAMILY_AI) {
468 spin_lock_irqsave(&adev->ddev->event_lock, flags);
469 mod_freesync_handle_v_update(
470 adev->dm.freesync_module,
472 &acrtc_state->vrr_params);
474 dc_stream_adjust_vmin_vmax(
477 &acrtc_state->vrr_params.adjust);
478 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
485 * dm_crtc_high_irq() - Handles CRTC interrupt
486 * @interrupt_params: used for determining the CRTC instance
488 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
491 static void dm_crtc_high_irq(void *interrupt_params)
493 struct common_irq_params *irq_params = interrupt_params;
494 struct amdgpu_device *adev = irq_params->adev;
495 struct amdgpu_crtc *acrtc;
496 struct dm_crtc_state *acrtc_state;
499 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 acrtc_state = to_dm_crtc_state(acrtc->base.state);
505 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
506 amdgpu_dm_vrr_active(acrtc_state),
507 acrtc_state->active_planes);
510 * Core vblank handling at start of front-porch is only possible
511 * in non-vrr mode, as only there vblank timestamping will give
512 * valid results while done in front-porch. Otherwise defer it
513 * to dm_vupdate_high_irq after end of front-porch.
515 if (!amdgpu_dm_vrr_active(acrtc_state))
516 drm_crtc_handle_vblank(&acrtc->base);
519 * Following stuff must happen at start of vblank, for crc
520 * computation and below-the-range btr support in vrr mode.
522 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
524 /* BTR updates need to happen before VUPDATE on Vega and above. */
525 if (adev->family < AMDGPU_FAMILY_AI)
528 spin_lock_irqsave(&adev->ddev->event_lock, flags);
530 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
531 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
532 mod_freesync_handle_v_update(adev->dm.freesync_module,
534 &acrtc_state->vrr_params);
536 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
537 &acrtc_state->vrr_params.adjust);
541 * If there aren't any active_planes then DCH HUBP may be clock-gated.
542 * In that case, pageflip completion interrupts won't fire and pageflip
543 * completion events won't get delivered. Prevent this by sending
544 * pending pageflip events from here if a flip is still pending.
546 * If any planes are enabled, use dm_pflip_high_irq() instead, to
547 * avoid race conditions between flip programming and completion,
548 * which could cause too early flip completion events.
550 if (adev->family >= AMDGPU_FAMILY_RV &&
551 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
552 acrtc_state->active_planes == 0) {
554 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
556 drm_crtc_vblank_put(&acrtc->base);
558 acrtc->pflip_status = AMDGPU_FLIP_NONE;
561 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
564 static int dm_set_clockgating_state(void *handle,
565 enum amd_clockgating_state state)
570 static int dm_set_powergating_state(void *handle,
571 enum amd_powergating_state state)
576 /* Prototypes of private functions */
577 static int dm_early_init(void* handle);
579 /* Allocate memory for FBC compressed data */
580 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
582 struct drm_device *dev = connector->dev;
583 struct amdgpu_device *adev = dev->dev_private;
584 struct dm_comressor_info *compressor = &adev->dm.compressor;
585 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
586 struct drm_display_mode *mode;
587 unsigned long max_size = 0;
589 if (adev->dm.dc->fbc_compressor == NULL)
592 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
595 if (compressor->bo_ptr)
599 list_for_each_entry(mode, &connector->modes, head) {
600 if (max_size < mode->htotal * mode->vtotal)
601 max_size = mode->htotal * mode->vtotal;
605 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
606 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
607 &compressor->gpu_addr, &compressor->cpu_addr);
610 DRM_ERROR("DM: Failed to initialize FBC\n");
612 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
613 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
620 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
621 int pipe, bool *enabled,
622 unsigned char *buf, int max_bytes)
624 struct drm_device *dev = dev_get_drvdata(kdev);
625 struct amdgpu_device *adev = dev->dev_private;
626 struct drm_connector *connector;
627 struct drm_connector_list_iter conn_iter;
628 struct amdgpu_dm_connector *aconnector;
633 mutex_lock(&adev->dm.audio_lock);
635 drm_connector_list_iter_begin(dev, &conn_iter);
636 drm_for_each_connector_iter(connector, &conn_iter) {
637 aconnector = to_amdgpu_dm_connector(connector);
638 if (aconnector->audio_inst != port)
642 ret = drm_eld_size(connector->eld);
643 memcpy(buf, connector->eld, min(max_bytes, ret));
647 drm_connector_list_iter_end(&conn_iter);
649 mutex_unlock(&adev->dm.audio_lock);
651 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
657 .get_eld = amdgpu_dm_audio_component_get_eld,
660 static int amdgpu_dm_audio_component_bind(struct device *kdev,
661 struct device *hda_kdev, void *data)
663 struct drm_device *dev = dev_get_drvdata(kdev);
664 struct amdgpu_device *adev = dev->dev_private;
665 struct drm_audio_component *acomp = data;
667 acomp->ops = &amdgpu_dm_audio_component_ops;
669 adev->dm.audio_component = acomp;
674 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
675 struct device *hda_kdev, void *data)
677 struct drm_device *dev = dev_get_drvdata(kdev);
678 struct amdgpu_device *adev = dev->dev_private;
679 struct drm_audio_component *acomp = data;
683 adev->dm.audio_component = NULL;
686 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
687 .bind = amdgpu_dm_audio_component_bind,
688 .unbind = amdgpu_dm_audio_component_unbind,
691 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698 adev->mode_info.audio.enabled = true;
700 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
702 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
703 adev->mode_info.audio.pin[i].channels = -1;
704 adev->mode_info.audio.pin[i].rate = -1;
705 adev->mode_info.audio.pin[i].bits_per_sample = -1;
706 adev->mode_info.audio.pin[i].status_bits = 0;
707 adev->mode_info.audio.pin[i].category_code = 0;
708 adev->mode_info.audio.pin[i].connected = false;
709 adev->mode_info.audio.pin[i].id =
710 adev->dm.dc->res_pool->audios[i]->inst;
711 adev->mode_info.audio.pin[i].offset = 0;
714 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
718 adev->dm.audio_registered = true;
723 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 if (!adev->mode_info.audio.enabled)
731 if (adev->dm.audio_registered) {
732 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
733 adev->dm.audio_registered = false;
736 /* TODO: Disable audio? */
738 adev->mode_info.audio.enabled = false;
741 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
743 struct drm_audio_component *acomp = adev->dm.audio_component;
745 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
746 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
748 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 static int dm_dmub_hw_init(struct amdgpu_device *adev)
755 const struct dmcub_firmware_header_v1_0 *hdr;
756 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
757 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
758 const struct firmware *dmub_fw = adev->dm.dmub_fw;
759 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
760 struct abm *abm = adev->dm.dc->res_pool->abm;
761 struct dmub_srv_hw_params hw_params;
762 enum dmub_status status;
763 const unsigned char *fw_inst_const, *fw_bss_data;
764 uint32_t i, fw_inst_const_size, fw_bss_data_size;
768 /* DMUB isn't supported on the ASIC. */
772 DRM_ERROR("No framebuffer info for DMUB service.\n");
777 /* Firmware required for DMUB support. */
778 DRM_ERROR("No firmware provided for DMUB.\n");
782 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
783 if (status != DMUB_STATUS_OK) {
784 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
788 if (!has_hw_support) {
789 DRM_INFO("DMUB unsupported on ASIC\n");
793 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
795 fw_inst_const = dmub_fw->data +
796 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
799 fw_bss_data = dmub_fw->data +
800 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 le32_to_cpu(hdr->inst_const_bytes);
803 /* Copy firmware and bios info into FB memory. */
804 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
805 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
807 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
809 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
810 * amdgpu_ucode_init_single_fw will load dmub firmware
811 * fw_inst_const part to cw0; otherwise, the firmware back door load
812 * will be done by dm_dmub_hw_init
814 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
815 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
819 if (fw_bss_data_size)
820 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
821 fw_bss_data, fw_bss_data_size);
823 /* Copy firmware bios info into FB memory. */
824 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
827 /* Reset regions that need to be reset. */
828 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
829 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
831 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
832 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
834 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
835 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
837 /* Initialize hardware. */
838 memset(&hw_params, 0, sizeof(hw_params));
839 hw_params.fb_base = adev->gmc.fb_start;
840 hw_params.fb_offset = adev->gmc.aper_base;
842 /* backdoor load firmware and trigger dmub running */
843 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
844 hw_params.load_inst_const = true;
847 hw_params.psp_version = dmcu->psp_version;
849 for (i = 0; i < fb_info->num_fb; ++i)
850 hw_params.fb[i] = &fb_info->fb[i];
852 status = dmub_srv_hw_init(dmub_srv, &hw_params);
853 if (status != DMUB_STATUS_OK) {
854 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
858 /* Wait for firmware load to finish. */
859 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
860 if (status != DMUB_STATUS_OK)
861 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
863 /* Init DMCU and ABM if available. */
865 dmcu->funcs->dmcu_init(dmcu);
866 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
869 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
870 if (!adev->dm.dc->ctx->dmub_srv) {
871 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
875 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
876 adev->dm.dmcub_fw_version);
881 static int amdgpu_dm_init(struct amdgpu_device *adev)
883 struct dc_init_data init_data;
884 #ifdef CONFIG_DRM_AMD_DC_HDCP
885 struct dc_callback_init init_params;
889 adev->dm.ddev = adev->ddev;
890 adev->dm.adev = adev;
892 /* Zero all the fields */
893 memset(&init_data, 0, sizeof(init_data));
894 #ifdef CONFIG_DRM_AMD_DC_HDCP
895 memset(&init_params, 0, sizeof(init_params));
898 mutex_init(&adev->dm.dc_lock);
899 mutex_init(&adev->dm.audio_lock);
901 if(amdgpu_dm_irq_init(adev)) {
902 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
906 init_data.asic_id.chip_family = adev->family;
908 init_data.asic_id.pci_revision_id = adev->pdev->revision;
909 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
911 init_data.asic_id.vram_width = adev->gmc.vram_width;
912 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
913 init_data.asic_id.atombios_base_address =
914 adev->mode_info.atom_context->bios;
916 init_data.driver = adev;
918 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
920 if (!adev->dm.cgs_device) {
921 DRM_ERROR("amdgpu: failed to create cgs device.\n");
925 init_data.cgs_device = adev->dm.cgs_device;
927 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
929 switch (adev->asic_type) {
934 init_data.flags.gpu_vm_support = true;
940 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
941 init_data.flags.fbc_support = true;
943 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
944 init_data.flags.multi_mon_pp_mclk_switch = true;
946 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
947 init_data.flags.disable_fractional_pwm = true;
949 init_data.flags.power_down_display_on_boot = true;
951 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
953 /* Display Core create. */
954 adev->dm.dc = dc_create(&init_data);
957 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
959 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
963 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
964 adev->dm.dc->debug.force_single_disp_pipe_split = false;
965 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
968 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
969 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
971 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
972 adev->dm.dc->debug.disable_stutter = true;
974 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
975 adev->dm.dc->debug.disable_dsc = true;
977 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
978 adev->dm.dc->debug.disable_clock_gate = true;
980 r = dm_dmub_hw_init(adev);
982 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
986 dc_hardware_init(adev->dm.dc);
988 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
989 if (!adev->dm.freesync_module) {
991 "amdgpu: failed to initialize freesync_module.\n");
993 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
994 adev->dm.freesync_module);
996 amdgpu_dm_init_color_mod();
998 #ifdef CONFIG_DRM_AMD_DC_HDCP
999 if (adev->asic_type >= CHIP_RAVEN) {
1000 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1002 if (!adev->dm.hdcp_workqueue)
1003 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1005 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1007 dc_init_callbacks(adev->dm.dc, &init_params);
1010 if (amdgpu_dm_initialize_drm_device(adev)) {
1012 "amdgpu: failed to initialize sw for display support.\n");
1016 /* Update the actual used number of crtc */
1017 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1019 /* create fake encoders for MST */
1020 dm_dp_create_fake_mst_encoders(adev);
1022 /* TODO: Add_display_info? */
1024 /* TODO use dynamic cursor width */
1025 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1026 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1028 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1030 "amdgpu: failed to initialize sw for display support.\n");
1034 DRM_DEBUG_DRIVER("KMS initialized.\n");
1038 amdgpu_dm_fini(adev);
1043 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1047 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1048 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1051 amdgpu_dm_audio_fini(adev);
1053 amdgpu_dm_destroy_drm_device(&adev->dm);
1055 #ifdef CONFIG_DRM_AMD_DC_HDCP
1056 if (adev->dm.hdcp_workqueue) {
1057 hdcp_destroy(adev->dm.hdcp_workqueue);
1058 adev->dm.hdcp_workqueue = NULL;
1062 dc_deinit_callbacks(adev->dm.dc);
1064 if (adev->dm.dc->ctx->dmub_srv) {
1065 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1066 adev->dm.dc->ctx->dmub_srv = NULL;
1069 if (adev->dm.dmub_bo)
1070 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1071 &adev->dm.dmub_bo_gpu_addr,
1072 &adev->dm.dmub_bo_cpu_addr);
1074 /* DC Destroy TODO: Replace destroy DAL */
1076 dc_destroy(&adev->dm.dc);
1078 * TODO: pageflip, vlank interrupt
1080 * amdgpu_dm_irq_fini(adev);
1083 if (adev->dm.cgs_device) {
1084 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1085 adev->dm.cgs_device = NULL;
1087 if (adev->dm.freesync_module) {
1088 mod_freesync_destroy(adev->dm.freesync_module);
1089 adev->dm.freesync_module = NULL;
1092 mutex_destroy(&adev->dm.audio_lock);
1093 mutex_destroy(&adev->dm.dc_lock);
1098 static int load_dmcu_fw(struct amdgpu_device *adev)
1100 const char *fw_name_dmcu = NULL;
1102 const struct dmcu_firmware_header_v1_0 *hdr;
1104 switch(adev->asic_type) {
1114 case CHIP_POLARIS11:
1115 case CHIP_POLARIS10:
1116 case CHIP_POLARIS12:
1124 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1125 case CHIP_SIENNA_CICHLID:
1126 case CHIP_NAVY_FLOUNDER:
1130 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1133 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1134 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1135 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1136 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1141 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1145 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1146 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1150 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1152 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1153 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1154 adev->dm.fw_dmcu = NULL;
1158 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1163 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1165 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1167 release_firmware(adev->dm.fw_dmcu);
1168 adev->dm.fw_dmcu = NULL;
1172 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1173 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1174 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1175 adev->firmware.fw_size +=
1176 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1178 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1179 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1180 adev->firmware.fw_size +=
1181 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1183 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1185 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1190 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1192 struct amdgpu_device *adev = ctx;
1194 return dm_read_reg(adev->dm.dc->ctx, address);
1197 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1200 struct amdgpu_device *adev = ctx;
1202 return dm_write_reg(adev->dm.dc->ctx, address, value);
1205 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1207 struct dmub_srv_create_params create_params;
1208 struct dmub_srv_region_params region_params;
1209 struct dmub_srv_region_info region_info;
1210 struct dmub_srv_fb_params fb_params;
1211 struct dmub_srv_fb_info *fb_info;
1212 struct dmub_srv *dmub_srv;
1213 const struct dmcub_firmware_header_v1_0 *hdr;
1214 const char *fw_name_dmub;
1215 enum dmub_asic dmub_asic;
1216 enum dmub_status status;
1219 switch (adev->asic_type) {
1221 dmub_asic = DMUB_ASIC_DCN21;
1222 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1224 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1225 case CHIP_SIENNA_CICHLID:
1226 dmub_asic = DMUB_ASIC_DCN30;
1227 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1229 case CHIP_NAVY_FLOUNDER:
1230 dmub_asic = DMUB_ASIC_DCN30;
1231 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1236 /* ASIC doesn't support DMUB. */
1240 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1242 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1246 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1248 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1252 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1254 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1255 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1256 AMDGPU_UCODE_ID_DMCUB;
1257 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1259 adev->firmware.fw_size +=
1260 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1262 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1263 adev->dm.dmcub_fw_version);
1266 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1268 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1269 dmub_srv = adev->dm.dmub_srv;
1272 DRM_ERROR("Failed to allocate DMUB service!\n");
1276 memset(&create_params, 0, sizeof(create_params));
1277 create_params.user_ctx = adev;
1278 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1279 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1280 create_params.asic = dmub_asic;
1282 /* Create the DMUB service. */
1283 status = dmub_srv_create(dmub_srv, &create_params);
1284 if (status != DMUB_STATUS_OK) {
1285 DRM_ERROR("Error creating DMUB service: %d\n", status);
1289 /* Calculate the size of all the regions for the DMUB service. */
1290 memset(®ion_params, 0, sizeof(region_params));
1292 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1293 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1294 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1295 region_params.vbios_size = adev->bios_size;
1296 region_params.fw_bss_data = region_params.bss_data_size ?
1297 adev->dm.dmub_fw->data +
1298 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1299 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1300 region_params.fw_inst_const =
1301 adev->dm.dmub_fw->data +
1302 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1305 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1308 if (status != DMUB_STATUS_OK) {
1309 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1314 * Allocate a framebuffer based on the total size of all the regions.
1315 * TODO: Move this into GART.
1317 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1318 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1319 &adev->dm.dmub_bo_gpu_addr,
1320 &adev->dm.dmub_bo_cpu_addr);
1324 /* Rebase the regions on the framebuffer address. */
1325 memset(&fb_params, 0, sizeof(fb_params));
1326 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1327 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1328 fb_params.region_info = ®ion_info;
1330 adev->dm.dmub_fb_info =
1331 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1332 fb_info = adev->dm.dmub_fb_info;
1336 "Failed to allocate framebuffer info for DMUB service!\n");
1340 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1341 if (status != DMUB_STATUS_OK) {
1342 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1349 static int dm_sw_init(void *handle)
1351 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 r = dm_dmub_sw_init(adev);
1358 return load_dmcu_fw(adev);
1361 static int dm_sw_fini(void *handle)
1363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1365 kfree(adev->dm.dmub_fb_info);
1366 adev->dm.dmub_fb_info = NULL;
1368 if (adev->dm.dmub_srv) {
1369 dmub_srv_destroy(adev->dm.dmub_srv);
1370 adev->dm.dmub_srv = NULL;
1373 release_firmware(adev->dm.dmub_fw);
1374 adev->dm.dmub_fw = NULL;
1376 release_firmware(adev->dm.fw_dmcu);
1377 adev->dm.fw_dmcu = NULL;
1382 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1384 struct amdgpu_dm_connector *aconnector;
1385 struct drm_connector *connector;
1386 struct drm_connector_list_iter iter;
1389 drm_connector_list_iter_begin(dev, &iter);
1390 drm_for_each_connector_iter(connector, &iter) {
1391 aconnector = to_amdgpu_dm_connector(connector);
1392 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1393 aconnector->mst_mgr.aux) {
1394 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1396 aconnector->base.base.id);
1398 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1400 DRM_ERROR("DM_MST: Failed to start MST\n");
1401 aconnector->dc_link->type =
1402 dc_connection_single;
1407 drm_connector_list_iter_end(&iter);
1412 static int dm_late_init(void *handle)
1414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1416 struct dmcu_iram_parameters params;
1417 unsigned int linear_lut[16];
1419 struct dmcu *dmcu = NULL;
1422 if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1423 return detect_mst_link_for_all_connectors(adev->ddev);
1425 dmcu = adev->dm.dc->res_pool->dmcu;
1427 for (i = 0; i < 16; i++)
1428 linear_lut[i] = 0xFFFF * i / 15;
1431 params.backlight_ramping_start = 0xCCCC;
1432 params.backlight_ramping_reduction = 0xCCCCCCCC;
1433 params.backlight_lut_array_size = 16;
1434 params.backlight_lut_array = linear_lut;
1436 /* Min backlight level after ABM reduction, Don't allow below 1%
1437 * 0xFFFF x 0.01 = 0x28F
1439 params.min_abm_backlight = 0x28F;
1441 /* In the case where abm is implemented on dmcub,
1442 * dmcu object will be null.
1443 * ABM 2.4 and up are implemented on dmcub.
1446 ret = dmcu_load_iram(dmcu, params);
1447 else if (adev->dm.dc->ctx->dmub_srv)
1448 ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1453 return detect_mst_link_for_all_connectors(adev->ddev);
1456 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1458 struct amdgpu_dm_connector *aconnector;
1459 struct drm_connector *connector;
1460 struct drm_connector_list_iter iter;
1461 struct drm_dp_mst_topology_mgr *mgr;
1463 bool need_hotplug = false;
1465 drm_connector_list_iter_begin(dev, &iter);
1466 drm_for_each_connector_iter(connector, &iter) {
1467 aconnector = to_amdgpu_dm_connector(connector);
1468 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1469 aconnector->mst_port)
1472 mgr = &aconnector->mst_mgr;
1475 drm_dp_mst_topology_mgr_suspend(mgr);
1477 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1479 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1480 need_hotplug = true;
1484 drm_connector_list_iter_end(&iter);
1487 drm_kms_helper_hotplug_event(dev);
1490 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1492 struct smu_context *smu = &adev->smu;
1495 if (!is_support_sw_smu(adev))
1498 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1499 * on window driver dc implementation.
1500 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1501 * should be passed to smu during boot up and resume from s3.
1502 * boot up: dc calculate dcn watermark clock settings within dc_create,
1503 * dcn20_resource_construct
1504 * then call pplib functions below to pass the settings to smu:
1505 * smu_set_watermarks_for_clock_ranges
1506 * smu_set_watermarks_table
1507 * navi10_set_watermarks_table
1508 * smu_write_watermarks_table
1510 * For Renoir, clock settings of dcn watermark are also fixed values.
1511 * dc has implemented different flow for window driver:
1512 * dc_hardware_init / dc_set_power_state
1517 * smu_set_watermarks_for_clock_ranges
1518 * renoir_set_watermarks_table
1519 * smu_write_watermarks_table
1522 * dc_hardware_init -> amdgpu_dm_init
1523 * dc_set_power_state --> dm_resume
1525 * therefore, this function apply to navi10/12/14 but not Renoir
1528 switch(adev->asic_type) {
1537 ret = smu_write_watermarks_table(smu);
1539 DRM_ERROR("Failed to update WMTABLE!\n");
1547 * dm_hw_init() - Initialize DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Initialize the &struct amdgpu_display_manager device. This involves calling
1551 * the initializers of each DM component, then populating the struct with them.
1553 * Although the function implies hardware initialization, both hardware and
1554 * software are initialized here. Splitting them out to their relevant init
1555 * hooks is a future TODO item.
1557 * Some notable things that are initialized here:
1559 * - Display Core, both software and hardware
1560 * - DC modules that we need (freesync and color management)
1561 * - DRM software states
1562 * - Interrupt sources and handlers
1564 * - Debug FS entries, if enabled
1566 static int dm_hw_init(void *handle)
1568 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1569 /* Create DAL display manager */
1570 amdgpu_dm_init(adev);
1571 amdgpu_dm_hpd_init(adev);
1577 * dm_hw_fini() - Teardown DC device
1578 * @handle: The base driver device containing the amdgpu_dm device.
1580 * Teardown components within &struct amdgpu_display_manager that require
1581 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1582 * were loaded. Also flush IRQ workqueues and disable them.
1584 static int dm_hw_fini(void *handle)
1586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1588 amdgpu_dm_hpd_fini(adev);
1590 amdgpu_dm_irq_fini(adev);
1591 amdgpu_dm_fini(adev);
1596 static int dm_enable_vblank(struct drm_crtc *crtc);
1597 static void dm_disable_vblank(struct drm_crtc *crtc);
1599 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1600 struct dc_state *state, bool enable)
1602 enum dc_irq_source irq_source;
1603 struct amdgpu_crtc *acrtc;
1607 for (i = 0; i < state->stream_count; i++) {
1608 acrtc = get_crtc_by_otg_inst(
1609 adev, state->stream_status[i].primary_otg_inst);
1611 if (acrtc && state->stream_status[i].plane_count != 0) {
1612 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1613 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1614 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1615 acrtc->crtc_id, enable ? "en" : "dis", rc);
1617 DRM_WARN("Failed to %s pflip interrupts\n",
1618 enable ? "enable" : "disable");
1621 rc = dm_enable_vblank(&acrtc->base);
1623 DRM_WARN("Failed to enable vblank interrupts\n");
1625 dm_disable_vblank(&acrtc->base);
1633 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1635 struct dc_state *context = NULL;
1636 enum dc_status res = DC_ERROR_UNEXPECTED;
1638 struct dc_stream_state *del_streams[MAX_PIPES];
1639 int del_streams_count = 0;
1641 memset(del_streams, 0, sizeof(del_streams));
1643 context = dc_create_state(dc);
1644 if (context == NULL)
1645 goto context_alloc_fail;
1647 dc_resource_state_copy_construct_current(dc, context);
1649 /* First remove from context all streams */
1650 for (i = 0; i < context->stream_count; i++) {
1651 struct dc_stream_state *stream = context->streams[i];
1653 del_streams[del_streams_count++] = stream;
1656 /* Remove all planes for removed streams and then remove the streams */
1657 for (i = 0; i < del_streams_count; i++) {
1658 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1659 res = DC_FAIL_DETACH_SURFACES;
1663 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1669 res = dc_validate_global_state(dc, context, false);
1672 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1676 res = dc_commit_state(dc, context);
1679 dc_release_state(context);
1685 static int dm_suspend(void *handle)
1687 struct amdgpu_device *adev = handle;
1688 struct amdgpu_display_manager *dm = &adev->dm;
1691 if (adev->in_gpu_reset) {
1692 mutex_lock(&dm->dc_lock);
1693 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1695 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1697 amdgpu_dm_commit_zero_streams(dm->dc);
1699 amdgpu_dm_irq_suspend(adev);
1704 WARN_ON(adev->dm.cached_state);
1705 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1707 s3_handle_mst(adev->ddev, true);
1709 amdgpu_dm_irq_suspend(adev);
1712 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1717 static struct amdgpu_dm_connector *
1718 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1719 struct drm_crtc *crtc)
1722 struct drm_connector_state *new_con_state;
1723 struct drm_connector *connector;
1724 struct drm_crtc *crtc_from_state;
1726 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1727 crtc_from_state = new_con_state->crtc;
1729 if (crtc_from_state == crtc)
1730 return to_amdgpu_dm_connector(connector);
1736 static void emulated_link_detect(struct dc_link *link)
1738 struct dc_sink_init_data sink_init_data = { 0 };
1739 struct display_sink_capability sink_caps = { 0 };
1740 enum dc_edid_status edid_status;
1741 struct dc_context *dc_ctx = link->ctx;
1742 struct dc_sink *sink = NULL;
1743 struct dc_sink *prev_sink = NULL;
1745 link->type = dc_connection_none;
1746 prev_sink = link->local_sink;
1748 if (prev_sink != NULL)
1749 dc_sink_retain(prev_sink);
1751 switch (link->connector_signal) {
1752 case SIGNAL_TYPE_HDMI_TYPE_A: {
1753 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1754 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1758 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1759 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1760 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1764 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1765 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1766 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1770 case SIGNAL_TYPE_LVDS: {
1771 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1772 sink_caps.signal = SIGNAL_TYPE_LVDS;
1776 case SIGNAL_TYPE_EDP: {
1777 sink_caps.transaction_type =
1778 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1779 sink_caps.signal = SIGNAL_TYPE_EDP;
1783 case SIGNAL_TYPE_DISPLAY_PORT: {
1784 sink_caps.transaction_type =
1785 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1786 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1791 DC_ERROR("Invalid connector type! signal:%d\n",
1792 link->connector_signal);
1796 sink_init_data.link = link;
1797 sink_init_data.sink_signal = sink_caps.signal;
1799 sink = dc_sink_create(&sink_init_data);
1801 DC_ERROR("Failed to create sink!\n");
1805 /* dc_sink_create returns a new reference */
1806 link->local_sink = sink;
1808 edid_status = dm_helpers_read_local_edid(
1813 if (edid_status != EDID_OK)
1814 DC_ERROR("Failed to read EDID");
1818 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1819 struct amdgpu_display_manager *dm)
1822 struct dc_surface_update surface_updates[MAX_SURFACES];
1823 struct dc_plane_info plane_infos[MAX_SURFACES];
1824 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1825 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1826 struct dc_stream_update stream_update;
1830 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1833 dm_error("Failed to allocate update bundle\n");
1837 for (k = 0; k < dc_state->stream_count; k++) {
1838 bundle->stream_update.stream = dc_state->streams[k];
1840 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1841 bundle->surface_updates[m].surface =
1842 dc_state->stream_status->plane_states[m];
1843 bundle->surface_updates[m].surface->force_full_update =
1846 dc_commit_updates_for_stream(
1847 dm->dc, bundle->surface_updates,
1848 dc_state->stream_status->plane_count,
1849 dc_state->streams[k], &bundle->stream_update, dc_state);
1858 static int dm_resume(void *handle)
1860 struct amdgpu_device *adev = handle;
1861 struct drm_device *ddev = adev->ddev;
1862 struct amdgpu_display_manager *dm = &adev->dm;
1863 struct amdgpu_dm_connector *aconnector;
1864 struct drm_connector *connector;
1865 struct drm_connector_list_iter iter;
1866 struct drm_crtc *crtc;
1867 struct drm_crtc_state *new_crtc_state;
1868 struct dm_crtc_state *dm_new_crtc_state;
1869 struct drm_plane *plane;
1870 struct drm_plane_state *new_plane_state;
1871 struct dm_plane_state *dm_new_plane_state;
1872 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1873 enum dc_connection_type new_connection_type = dc_connection_none;
1874 struct dc_state *dc_state;
1877 if (adev->in_gpu_reset) {
1878 dc_state = dm->cached_dc_state;
1880 r = dm_dmub_hw_init(adev);
1882 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1884 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1887 amdgpu_dm_irq_resume_early(adev);
1889 for (i = 0; i < dc_state->stream_count; i++) {
1890 dc_state->streams[i]->mode_changed = true;
1891 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1892 dc_state->stream_status->plane_states[j]->update_flags.raw
1897 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1899 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1901 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1903 dc_release_state(dm->cached_dc_state);
1904 dm->cached_dc_state = NULL;
1906 amdgpu_dm_irq_resume_late(adev);
1908 mutex_unlock(&dm->dc_lock);
1912 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1913 dc_release_state(dm_state->context);
1914 dm_state->context = dc_create_state(dm->dc);
1915 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1916 dc_resource_state_construct(dm->dc, dm_state->context);
1918 /* Before powering on DC we need to re-initialize DMUB. */
1919 r = dm_dmub_hw_init(adev);
1921 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1923 /* power on hardware */
1924 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1926 /* program HPD filter */
1930 * early enable HPD Rx IRQ, should be done before set mode as short
1931 * pulse interrupts are used for MST
1933 amdgpu_dm_irq_resume_early(adev);
1935 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1936 s3_handle_mst(ddev, false);
1939 drm_connector_list_iter_begin(ddev, &iter);
1940 drm_for_each_connector_iter(connector, &iter) {
1941 aconnector = to_amdgpu_dm_connector(connector);
1944 * this is the case when traversing through already created
1945 * MST connectors, should be skipped
1947 if (aconnector->mst_port)
1950 mutex_lock(&aconnector->hpd_lock);
1951 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1952 DRM_ERROR("KMS: Failed to detect connector\n");
1954 if (aconnector->base.force && new_connection_type == dc_connection_none)
1955 emulated_link_detect(aconnector->dc_link);
1957 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1959 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1960 aconnector->fake_enable = false;
1962 if (aconnector->dc_sink)
1963 dc_sink_release(aconnector->dc_sink);
1964 aconnector->dc_sink = NULL;
1965 amdgpu_dm_update_connector_after_detect(aconnector);
1966 mutex_unlock(&aconnector->hpd_lock);
1968 drm_connector_list_iter_end(&iter);
1970 /* Force mode set in atomic commit */
1971 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1972 new_crtc_state->active_changed = true;
1975 * atomic_check is expected to create the dc states. We need to release
1976 * them here, since they were duplicated as part of the suspend
1979 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1980 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1981 if (dm_new_crtc_state->stream) {
1982 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1983 dc_stream_release(dm_new_crtc_state->stream);
1984 dm_new_crtc_state->stream = NULL;
1988 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1989 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1990 if (dm_new_plane_state->dc_state) {
1991 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1992 dc_plane_state_release(dm_new_plane_state->dc_state);
1993 dm_new_plane_state->dc_state = NULL;
1997 drm_atomic_helper_resume(ddev, dm->cached_state);
1999 dm->cached_state = NULL;
2001 amdgpu_dm_irq_resume_late(adev);
2003 amdgpu_dm_smu_write_watermarks_table(adev);
2011 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2012 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2013 * the base driver's device list to be initialized and torn down accordingly.
2015 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2018 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2020 .early_init = dm_early_init,
2021 .late_init = dm_late_init,
2022 .sw_init = dm_sw_init,
2023 .sw_fini = dm_sw_fini,
2024 .hw_init = dm_hw_init,
2025 .hw_fini = dm_hw_fini,
2026 .suspend = dm_suspend,
2027 .resume = dm_resume,
2028 .is_idle = dm_is_idle,
2029 .wait_for_idle = dm_wait_for_idle,
2030 .check_soft_reset = dm_check_soft_reset,
2031 .soft_reset = dm_soft_reset,
2032 .set_clockgating_state = dm_set_clockgating_state,
2033 .set_powergating_state = dm_set_powergating_state,
2036 const struct amdgpu_ip_block_version dm_ip_block =
2038 .type = AMD_IP_BLOCK_TYPE_DCE,
2042 .funcs = &amdgpu_dm_funcs,
2052 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2053 .fb_create = amdgpu_display_user_framebuffer_create,
2054 .output_poll_changed = drm_fb_helper_output_poll_changed,
2055 .atomic_check = amdgpu_dm_atomic_check,
2056 .atomic_commit = amdgpu_dm_atomic_commit,
2059 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2060 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2063 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2065 u32 max_cll, min_cll, max, min, q, r;
2066 struct amdgpu_dm_backlight_caps *caps;
2067 struct amdgpu_display_manager *dm;
2068 struct drm_connector *conn_base;
2069 struct amdgpu_device *adev;
2070 struct dc_link *link = NULL;
2071 static const u8 pre_computed_values[] = {
2072 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2073 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2075 if (!aconnector || !aconnector->dc_link)
2078 link = aconnector->dc_link;
2079 if (link->connector_signal != SIGNAL_TYPE_EDP)
2082 conn_base = &aconnector->base;
2083 adev = conn_base->dev->dev_private;
2085 caps = &dm->backlight_caps;
2086 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2087 caps->aux_support = false;
2088 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2089 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2091 if (caps->ext_caps->bits.oled == 1 ||
2092 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2093 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2094 caps->aux_support = true;
2096 /* From the specification (CTA-861-G), for calculating the maximum
2097 * luminance we need to use:
2098 * Luminance = 50*2**(CV/32)
2099 * Where CV is a one-byte value.
2100 * For calculating this expression we may need float point precision;
2101 * to avoid this complexity level, we take advantage that CV is divided
2102 * by a constant. From the Euclids division algorithm, we know that CV
2103 * can be written as: CV = 32*q + r. Next, we replace CV in the
2104 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2105 * need to pre-compute the value of r/32. For pre-computing the values
2106 * We just used the following Ruby line:
2107 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2108 * The results of the above expressions can be verified at
2109 * pre_computed_values.
2113 max = (1 << q) * pre_computed_values[r];
2115 // min luminance: maxLum * (CV/255)^2 / 100
2116 q = DIV_ROUND_CLOSEST(min_cll, 255);
2117 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2119 caps->aux_max_input_signal = max;
2120 caps->aux_min_input_signal = min;
2123 void amdgpu_dm_update_connector_after_detect(
2124 struct amdgpu_dm_connector *aconnector)
2126 struct drm_connector *connector = &aconnector->base;
2127 struct drm_device *dev = connector->dev;
2128 struct dc_sink *sink;
2130 /* MST handled by drm_mst framework */
2131 if (aconnector->mst_mgr.mst_state == true)
2134 sink = aconnector->dc_link->local_sink;
2136 dc_sink_retain(sink);
2139 * Edid mgmt connector gets first update only in mode_valid hook and then
2140 * the connector sink is set to either fake or physical sink depends on link status.
2141 * Skip if already done during boot.
2143 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2144 && aconnector->dc_em_sink) {
2147 * For S3 resume with headless use eml_sink to fake stream
2148 * because on resume connector->sink is set to NULL
2150 mutex_lock(&dev->mode_config.mutex);
2153 if (aconnector->dc_sink) {
2154 amdgpu_dm_update_freesync_caps(connector, NULL);
2156 * retain and release below are used to
2157 * bump up refcount for sink because the link doesn't point
2158 * to it anymore after disconnect, so on next crtc to connector
2159 * reshuffle by UMD we will get into unwanted dc_sink release
2161 dc_sink_release(aconnector->dc_sink);
2163 aconnector->dc_sink = sink;
2164 dc_sink_retain(aconnector->dc_sink);
2165 amdgpu_dm_update_freesync_caps(connector,
2168 amdgpu_dm_update_freesync_caps(connector, NULL);
2169 if (!aconnector->dc_sink) {
2170 aconnector->dc_sink = aconnector->dc_em_sink;
2171 dc_sink_retain(aconnector->dc_sink);
2175 mutex_unlock(&dev->mode_config.mutex);
2178 dc_sink_release(sink);
2183 * TODO: temporary guard to look for proper fix
2184 * if this sink is MST sink, we should not do anything
2186 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2187 dc_sink_release(sink);
2191 if (aconnector->dc_sink == sink) {
2193 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2196 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2197 aconnector->connector_id);
2199 dc_sink_release(sink);
2203 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2204 aconnector->connector_id, aconnector->dc_sink, sink);
2206 mutex_lock(&dev->mode_config.mutex);
2209 * 1. Update status of the drm connector
2210 * 2. Send an event and let userspace tell us what to do
2214 * TODO: check if we still need the S3 mode update workaround.
2215 * If yes, put it here.
2217 if (aconnector->dc_sink)
2218 amdgpu_dm_update_freesync_caps(connector, NULL);
2220 aconnector->dc_sink = sink;
2221 dc_sink_retain(aconnector->dc_sink);
2222 if (sink->dc_edid.length == 0) {
2223 aconnector->edid = NULL;
2224 if (aconnector->dc_link->aux_mode) {
2225 drm_dp_cec_unset_edid(
2226 &aconnector->dm_dp_aux.aux);
2230 (struct edid *)sink->dc_edid.raw_edid;
2232 drm_connector_update_edid_property(connector,
2235 if (aconnector->dc_link->aux_mode)
2236 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2240 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2241 update_connector_ext_caps(aconnector);
2243 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2244 amdgpu_dm_update_freesync_caps(connector, NULL);
2245 drm_connector_update_edid_property(connector, NULL);
2246 aconnector->num_modes = 0;
2247 dc_sink_release(aconnector->dc_sink);
2248 aconnector->dc_sink = NULL;
2249 aconnector->edid = NULL;
2250 #ifdef CONFIG_DRM_AMD_DC_HDCP
2251 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2252 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2253 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2257 mutex_unlock(&dev->mode_config.mutex);
2259 update_subconnector_property(aconnector);
2262 dc_sink_release(sink);
2265 static void handle_hpd_irq(void *param)
2267 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2268 struct drm_connector *connector = &aconnector->base;
2269 struct drm_device *dev = connector->dev;
2270 enum dc_connection_type new_connection_type = dc_connection_none;
2271 #ifdef CONFIG_DRM_AMD_DC_HDCP
2272 struct amdgpu_device *adev = dev->dev_private;
2276 * In case of failure or MST no need to update connector status or notify the OS
2277 * since (for MST case) MST does this in its own context.
2279 mutex_lock(&aconnector->hpd_lock);
2281 #ifdef CONFIG_DRM_AMD_DC_HDCP
2282 if (adev->dm.hdcp_workqueue)
2283 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2285 if (aconnector->fake_enable)
2286 aconnector->fake_enable = false;
2288 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2289 DRM_ERROR("KMS: Failed to detect connector\n");
2291 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2292 emulated_link_detect(aconnector->dc_link);
2295 drm_modeset_lock_all(dev);
2296 dm_restore_drm_connector_state(dev, connector);
2297 drm_modeset_unlock_all(dev);
2299 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2300 drm_kms_helper_hotplug_event(dev);
2302 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2303 amdgpu_dm_update_connector_after_detect(aconnector);
2306 drm_modeset_lock_all(dev);
2307 dm_restore_drm_connector_state(dev, connector);
2308 drm_modeset_unlock_all(dev);
2310 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2311 drm_kms_helper_hotplug_event(dev);
2313 mutex_unlock(&aconnector->hpd_lock);
2317 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2319 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2321 bool new_irq_handled = false;
2323 int dpcd_bytes_to_read;
2325 const int max_process_count = 30;
2326 int process_count = 0;
2328 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2330 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2331 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2332 /* DPCD 0x200 - 0x201 for downstream IRQ */
2333 dpcd_addr = DP_SINK_COUNT;
2335 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2336 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2337 dpcd_addr = DP_SINK_COUNT_ESI;
2340 dret = drm_dp_dpcd_read(
2341 &aconnector->dm_dp_aux.aux,
2344 dpcd_bytes_to_read);
2346 while (dret == dpcd_bytes_to_read &&
2347 process_count < max_process_count) {
2353 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2354 /* handle HPD short pulse irq */
2355 if (aconnector->mst_mgr.mst_state)
2357 &aconnector->mst_mgr,
2361 if (new_irq_handled) {
2362 /* ACK at DPCD to notify down stream */
2363 const int ack_dpcd_bytes_to_write =
2364 dpcd_bytes_to_read - 1;
2366 for (retry = 0; retry < 3; retry++) {
2369 wret = drm_dp_dpcd_write(
2370 &aconnector->dm_dp_aux.aux,
2373 ack_dpcd_bytes_to_write);
2374 if (wret == ack_dpcd_bytes_to_write)
2378 /* check if there is new irq to be handled */
2379 dret = drm_dp_dpcd_read(
2380 &aconnector->dm_dp_aux.aux,
2383 dpcd_bytes_to_read);
2385 new_irq_handled = false;
2391 if (process_count == max_process_count)
2392 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2395 static void handle_hpd_rx_irq(void *param)
2397 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2398 struct drm_connector *connector = &aconnector->base;
2399 struct drm_device *dev = connector->dev;
2400 struct dc_link *dc_link = aconnector->dc_link;
2401 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2402 enum dc_connection_type new_connection_type = dc_connection_none;
2403 #ifdef CONFIG_DRM_AMD_DC_HDCP
2404 union hpd_irq_data hpd_irq_data;
2405 struct amdgpu_device *adev = dev->dev_private;
2407 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2411 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2412 * conflict, after implement i2c helper, this mutex should be
2415 if (dc_link->type != dc_connection_mst_branch)
2416 mutex_lock(&aconnector->hpd_lock);
2419 #ifdef CONFIG_DRM_AMD_DC_HDCP
2420 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2422 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2424 !is_mst_root_connector) {
2425 /* Downstream Port status changed. */
2426 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2427 DRM_ERROR("KMS: Failed to detect connector\n");
2429 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2430 emulated_link_detect(dc_link);
2432 if (aconnector->fake_enable)
2433 aconnector->fake_enable = false;
2435 amdgpu_dm_update_connector_after_detect(aconnector);
2438 drm_modeset_lock_all(dev);
2439 dm_restore_drm_connector_state(dev, connector);
2440 drm_modeset_unlock_all(dev);
2442 drm_kms_helper_hotplug_event(dev);
2443 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2445 if (aconnector->fake_enable)
2446 aconnector->fake_enable = false;
2448 amdgpu_dm_update_connector_after_detect(aconnector);
2451 drm_modeset_lock_all(dev);
2452 dm_restore_drm_connector_state(dev, connector);
2453 drm_modeset_unlock_all(dev);
2455 drm_kms_helper_hotplug_event(dev);
2458 #ifdef CONFIG_DRM_AMD_DC_HDCP
2459 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2460 if (adev->dm.hdcp_workqueue)
2461 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2464 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2465 (dc_link->type == dc_connection_mst_branch))
2466 dm_handle_hpd_rx_irq(aconnector);
2468 if (dc_link->type != dc_connection_mst_branch) {
2469 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2470 mutex_unlock(&aconnector->hpd_lock);
2474 static void register_hpd_handlers(struct amdgpu_device *adev)
2476 struct drm_device *dev = adev->ddev;
2477 struct drm_connector *connector;
2478 struct amdgpu_dm_connector *aconnector;
2479 const struct dc_link *dc_link;
2480 struct dc_interrupt_params int_params = {0};
2482 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2483 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2485 list_for_each_entry(connector,
2486 &dev->mode_config.connector_list, head) {
2488 aconnector = to_amdgpu_dm_connector(connector);
2489 dc_link = aconnector->dc_link;
2491 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2492 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2493 int_params.irq_source = dc_link->irq_source_hpd;
2495 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2497 (void *) aconnector);
2500 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2502 /* Also register for DP short pulse (hpd_rx). */
2503 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2504 int_params.irq_source = dc_link->irq_source_hpd_rx;
2506 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2508 (void *) aconnector);
2513 /* Register IRQ sources and initialize IRQ callbacks */
2514 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2516 struct dc *dc = adev->dm.dc;
2517 struct common_irq_params *c_irq_params;
2518 struct dc_interrupt_params int_params = {0};
2521 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2523 if (adev->asic_type >= CHIP_VEGA10)
2524 client_id = SOC15_IH_CLIENTID_DCE;
2526 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2527 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2530 * Actions of amdgpu_irq_add_id():
2531 * 1. Register a set() function with base driver.
2532 * Base driver will call set() function to enable/disable an
2533 * interrupt in DC hardware.
2534 * 2. Register amdgpu_dm_irq_handler().
2535 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2536 * coming from DC hardware.
2537 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2538 * for acknowledging and handling. */
2540 /* Use VBLANK interrupt */
2541 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2542 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2544 DRM_ERROR("Failed to add crtc irq id!\n");
2548 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2549 int_params.irq_source =
2550 dc_interrupt_to_irq_source(dc, i, 0);
2552 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2554 c_irq_params->adev = adev;
2555 c_irq_params->irq_src = int_params.irq_source;
2557 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2558 dm_crtc_high_irq, c_irq_params);
2561 /* Use VUPDATE interrupt */
2562 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2563 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2565 DRM_ERROR("Failed to add vupdate irq id!\n");
2569 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2570 int_params.irq_source =
2571 dc_interrupt_to_irq_source(dc, i, 0);
2573 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2575 c_irq_params->adev = adev;
2576 c_irq_params->irq_src = int_params.irq_source;
2578 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2579 dm_vupdate_high_irq, c_irq_params);
2582 /* Use GRPH_PFLIP interrupt */
2583 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2584 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2585 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2587 DRM_ERROR("Failed to add page flip irq id!\n");
2591 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592 int_params.irq_source =
2593 dc_interrupt_to_irq_source(dc, i, 0);
2595 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2597 c_irq_params->adev = adev;
2598 c_irq_params->irq_src = int_params.irq_source;
2600 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2601 dm_pflip_high_irq, c_irq_params);
2606 r = amdgpu_irq_add_id(adev, client_id,
2607 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2609 DRM_ERROR("Failed to add hpd irq id!\n");
2613 register_hpd_handlers(adev);
2618 #if defined(CONFIG_DRM_AMD_DC_DCN)
2619 /* Register IRQ sources and initialize IRQ callbacks */
2620 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2622 struct dc *dc = adev->dm.dc;
2623 struct common_irq_params *c_irq_params;
2624 struct dc_interrupt_params int_params = {0};
2628 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2629 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2632 * Actions of amdgpu_irq_add_id():
2633 * 1. Register a set() function with base driver.
2634 * Base driver will call set() function to enable/disable an
2635 * interrupt in DC hardware.
2636 * 2. Register amdgpu_dm_irq_handler().
2637 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2638 * coming from DC hardware.
2639 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2640 * for acknowledging and handling.
2643 /* Use VSTARTUP interrupt */
2644 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2645 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2647 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2650 DRM_ERROR("Failed to add crtc irq id!\n");
2654 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2655 int_params.irq_source =
2656 dc_interrupt_to_irq_source(dc, i, 0);
2658 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2660 c_irq_params->adev = adev;
2661 c_irq_params->irq_src = int_params.irq_source;
2663 amdgpu_dm_irq_register_interrupt(
2664 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2667 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2668 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2669 * to trigger at end of each vblank, regardless of state of the lock,
2670 * matching DCE behaviour.
2672 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2673 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2675 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2678 DRM_ERROR("Failed to add vupdate irq id!\n");
2682 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683 int_params.irq_source =
2684 dc_interrupt_to_irq_source(dc, i, 0);
2686 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2688 c_irq_params->adev = adev;
2689 c_irq_params->irq_src = int_params.irq_source;
2691 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 dm_vupdate_high_irq, c_irq_params);
2695 /* Use GRPH_PFLIP interrupt */
2696 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2697 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2699 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2701 DRM_ERROR("Failed to add page flip irq id!\n");
2705 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2706 int_params.irq_source =
2707 dc_interrupt_to_irq_source(dc, i, 0);
2709 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2711 c_irq_params->adev = adev;
2712 c_irq_params->irq_src = int_params.irq_source;
2714 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2715 dm_pflip_high_irq, c_irq_params);
2720 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2723 DRM_ERROR("Failed to add hpd irq id!\n");
2727 register_hpd_handlers(adev);
2734 * Acquires the lock for the atomic state object and returns
2735 * the new atomic state.
2737 * This should only be called during atomic check.
2739 static int dm_atomic_get_state(struct drm_atomic_state *state,
2740 struct dm_atomic_state **dm_state)
2742 struct drm_device *dev = state->dev;
2743 struct amdgpu_device *adev = dev->dev_private;
2744 struct amdgpu_display_manager *dm = &adev->dm;
2745 struct drm_private_state *priv_state;
2750 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2751 if (IS_ERR(priv_state))
2752 return PTR_ERR(priv_state);
2754 *dm_state = to_dm_atomic_state(priv_state);
2759 static struct dm_atomic_state *
2760 dm_atomic_get_new_state(struct drm_atomic_state *state)
2762 struct drm_device *dev = state->dev;
2763 struct amdgpu_device *adev = dev->dev_private;
2764 struct amdgpu_display_manager *dm = &adev->dm;
2765 struct drm_private_obj *obj;
2766 struct drm_private_state *new_obj_state;
2769 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2770 if (obj->funcs == dm->atomic_obj.funcs)
2771 return to_dm_atomic_state(new_obj_state);
2777 static struct dm_atomic_state *
2778 dm_atomic_get_old_state(struct drm_atomic_state *state)
2780 struct drm_device *dev = state->dev;
2781 struct amdgpu_device *adev = dev->dev_private;
2782 struct amdgpu_display_manager *dm = &adev->dm;
2783 struct drm_private_obj *obj;
2784 struct drm_private_state *old_obj_state;
2787 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2788 if (obj->funcs == dm->atomic_obj.funcs)
2789 return to_dm_atomic_state(old_obj_state);
2795 static struct drm_private_state *
2796 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2798 struct dm_atomic_state *old_state, *new_state;
2800 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2804 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2806 old_state = to_dm_atomic_state(obj->state);
2808 if (old_state && old_state->context)
2809 new_state->context = dc_copy_state(old_state->context);
2811 if (!new_state->context) {
2816 return &new_state->base;
2819 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2820 struct drm_private_state *state)
2822 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2824 if (dm_state && dm_state->context)
2825 dc_release_state(dm_state->context);
2830 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2831 .atomic_duplicate_state = dm_atomic_duplicate_state,
2832 .atomic_destroy_state = dm_atomic_destroy_state,
2835 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2837 struct dm_atomic_state *state;
2840 adev->mode_info.mode_config_initialized = true;
2842 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2843 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2845 adev->ddev->mode_config.max_width = 16384;
2846 adev->ddev->mode_config.max_height = 16384;
2848 adev->ddev->mode_config.preferred_depth = 24;
2849 adev->ddev->mode_config.prefer_shadow = 1;
2850 /* indicates support for immediate flip */
2851 adev->ddev->mode_config.async_page_flip = true;
2853 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2855 state = kzalloc(sizeof(*state), GFP_KERNEL);
2859 state->context = dc_create_state(adev->dm.dc);
2860 if (!state->context) {
2865 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2867 drm_atomic_private_obj_init(adev->ddev,
2868 &adev->dm.atomic_obj,
2870 &dm_atomic_state_funcs);
2872 r = amdgpu_display_modeset_create_props(adev);
2876 r = amdgpu_dm_audio_init(adev);
2883 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2884 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2885 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2887 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2888 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2890 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2892 #if defined(CONFIG_ACPI)
2893 struct amdgpu_dm_backlight_caps caps;
2895 if (dm->backlight_caps.caps_valid)
2898 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2899 if (caps.caps_valid) {
2900 dm->backlight_caps.caps_valid = true;
2901 if (caps.aux_support)
2903 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2904 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2906 dm->backlight_caps.min_input_signal =
2907 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2908 dm->backlight_caps.max_input_signal =
2909 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2912 if (dm->backlight_caps.aux_support)
2915 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2916 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2920 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2927 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2928 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2933 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2934 const uint32_t user_brightness)
2936 u32 min, max, conversion_pace;
2937 u32 brightness = user_brightness;
2942 if (!caps->aux_support) {
2943 max = caps->max_input_signal;
2944 min = caps->min_input_signal;
2946 * The brightness input is in the range 0-255
2947 * It needs to be rescaled to be between the
2948 * requested min and max input signal
2949 * It also needs to be scaled up by 0x101 to
2950 * match the DC interface which has a range of
2953 conversion_pace = 0x101;
2958 / AMDGPU_MAX_BL_LEVEL
2959 + min * conversion_pace;
2962 * We are doing a linear interpolation here, which is OK but
2963 * does not provide the optimal result. We probably want
2964 * something close to the Perceptual Quantizer (PQ) curve.
2966 max = caps->aux_max_input_signal;
2967 min = caps->aux_min_input_signal;
2969 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2970 + user_brightness * max;
2971 // Multiple the value by 1000 since we use millinits
2973 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2980 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2982 struct amdgpu_display_manager *dm = bl_get_data(bd);
2983 struct amdgpu_dm_backlight_caps caps;
2984 struct dc_link *link = NULL;
2988 amdgpu_dm_update_backlight_caps(dm);
2989 caps = dm->backlight_caps;
2991 link = (struct dc_link *)dm->backlight_link;
2993 brightness = convert_brightness(&caps, bd->props.brightness);
2994 // Change brightness based on AUX property
2995 if (caps.aux_support)
2996 return set_backlight_via_aux(link, brightness);
2998 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3003 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3005 struct amdgpu_display_manager *dm = bl_get_data(bd);
3006 int ret = dc_link_get_backlight_level(dm->backlight_link);
3008 if (ret == DC_ERROR_UNEXPECTED)
3009 return bd->props.brightness;
3013 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3014 .options = BL_CORE_SUSPENDRESUME,
3015 .get_brightness = amdgpu_dm_backlight_get_brightness,
3016 .update_status = amdgpu_dm_backlight_update_status,
3020 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3023 struct backlight_properties props = { 0 };
3025 amdgpu_dm_update_backlight_caps(dm);
3027 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3028 props.brightness = AMDGPU_MAX_BL_LEVEL;
3029 props.type = BACKLIGHT_RAW;
3031 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3032 dm->adev->ddev->primary->index);
3034 dm->backlight_dev = backlight_device_register(bl_name,
3035 dm->adev->ddev->dev,
3037 &amdgpu_dm_backlight_ops,
3040 if (IS_ERR(dm->backlight_dev))
3041 DRM_ERROR("DM: Backlight registration failed!\n");
3043 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3048 static int initialize_plane(struct amdgpu_display_manager *dm,
3049 struct amdgpu_mode_info *mode_info, int plane_id,
3050 enum drm_plane_type plane_type,
3051 const struct dc_plane_cap *plane_cap)
3053 struct drm_plane *plane;
3054 unsigned long possible_crtcs;
3057 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3059 DRM_ERROR("KMS: Failed to allocate plane\n");
3062 plane->type = plane_type;
3065 * HACK: IGT tests expect that the primary plane for a CRTC
3066 * can only have one possible CRTC. Only expose support for
3067 * any CRTC if they're not going to be used as a primary plane
3068 * for a CRTC - like overlay or underlay planes.
3070 possible_crtcs = 1 << plane_id;
3071 if (plane_id >= dm->dc->caps.max_streams)
3072 possible_crtcs = 0xff;
3074 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3077 DRM_ERROR("KMS: Failed to initialize plane\n");
3083 mode_info->planes[plane_id] = plane;
3089 static void register_backlight_device(struct amdgpu_display_manager *dm,
3090 struct dc_link *link)
3092 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3093 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3095 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3096 link->type != dc_connection_none) {
3098 * Event if registration failed, we should continue with
3099 * DM initialization because not having a backlight control
3100 * is better then a black screen.
3102 amdgpu_dm_register_backlight_device(dm);
3104 if (dm->backlight_dev)
3105 dm->backlight_link = link;
3112 * In this architecture, the association
3113 * connector -> encoder -> crtc
3114 * id not really requried. The crtc and connector will hold the
3115 * display_index as an abstraction to use with DAL component
3117 * Returns 0 on success
3119 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3121 struct amdgpu_display_manager *dm = &adev->dm;
3123 struct amdgpu_dm_connector *aconnector = NULL;
3124 struct amdgpu_encoder *aencoder = NULL;
3125 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3127 int32_t primary_planes;
3128 enum dc_connection_type new_connection_type = dc_connection_none;
3129 const struct dc_plane_cap *plane;
3131 link_cnt = dm->dc->caps.max_links;
3132 if (amdgpu_dm_mode_config_init(dm->adev)) {
3133 DRM_ERROR("DM: Failed to initialize mode config\n");
3137 /* There is one primary plane per CRTC */
3138 primary_planes = dm->dc->caps.max_streams;
3139 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3142 * Initialize primary planes, implicit planes for legacy IOCTLS.
3143 * Order is reversed to match iteration order in atomic check.
3145 for (i = (primary_planes - 1); i >= 0; i--) {
3146 plane = &dm->dc->caps.planes[i];
3148 if (initialize_plane(dm, mode_info, i,
3149 DRM_PLANE_TYPE_PRIMARY, plane)) {
3150 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3156 * Initialize overlay planes, index starting after primary planes.
3157 * These planes have a higher DRM index than the primary planes since
3158 * they should be considered as having a higher z-order.
3159 * Order is reversed to match iteration order in atomic check.
3161 * Only support DCN for now, and only expose one so we don't encourage
3162 * userspace to use up all the pipes.
3164 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3165 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3167 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3170 if (!plane->blends_with_above || !plane->blends_with_below)
3173 if (!plane->pixel_format_support.argb8888)
3176 if (initialize_plane(dm, NULL, primary_planes + i,
3177 DRM_PLANE_TYPE_OVERLAY, plane)) {
3178 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3182 /* Only create one overlay plane. */
3186 for (i = 0; i < dm->dc->caps.max_streams; i++)
3187 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3188 DRM_ERROR("KMS: Failed to initialize crtc\n");
3192 dm->display_indexes_num = dm->dc->caps.max_streams;
3194 /* loops over all connectors on the board */
3195 for (i = 0; i < link_cnt; i++) {
3196 struct dc_link *link = NULL;
3198 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3200 "KMS: Cannot support more than %d display indexes\n",
3201 AMDGPU_DM_MAX_DISPLAY_INDEX);
3205 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3209 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3213 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3214 DRM_ERROR("KMS: Failed to initialize encoder\n");
3218 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3219 DRM_ERROR("KMS: Failed to initialize connector\n");
3223 link = dc_get_link_at_index(dm->dc, i);
3225 if (!dc_link_detect_sink(link, &new_connection_type))
3226 DRM_ERROR("KMS: Failed to detect connector\n");
3228 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3229 emulated_link_detect(link);
3230 amdgpu_dm_update_connector_after_detect(aconnector);
3232 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3233 amdgpu_dm_update_connector_after_detect(aconnector);
3234 register_backlight_device(dm, link);
3235 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3236 amdgpu_dm_set_psr_caps(link);
3242 /* Software is initialized. Now we can register interrupt handlers. */
3243 switch (adev->asic_type) {
3253 case CHIP_POLARIS11:
3254 case CHIP_POLARIS10:
3255 case CHIP_POLARIS12:
3260 if (dce110_register_irq_handlers(dm->adev)) {
3261 DRM_ERROR("DM: Failed to initialize IRQ\n");
3265 #if defined(CONFIG_DRM_AMD_DC_DCN)
3271 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3272 case CHIP_SIENNA_CICHLID:
3273 case CHIP_NAVY_FLOUNDER:
3275 if (dcn10_register_irq_handlers(dm->adev)) {
3276 DRM_ERROR("DM: Failed to initialize IRQ\n");
3282 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3286 /* No userspace support. */
3287 dm->dc->debug.disable_tri_buf = true;
3297 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3299 drm_mode_config_cleanup(dm->ddev);
3300 drm_atomic_private_obj_fini(&dm->atomic_obj);
3304 /******************************************************************************
3305 * amdgpu_display_funcs functions
3306 *****************************************************************************/
3309 * dm_bandwidth_update - program display watermarks
3311 * @adev: amdgpu_device pointer
3313 * Calculate and program the display watermarks and line buffer allocation.
3315 static void dm_bandwidth_update(struct amdgpu_device *adev)
3317 /* TODO: implement later */
3320 static const struct amdgpu_display_funcs dm_display_funcs = {
3321 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3322 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3323 .backlight_set_level = NULL, /* never called for DC */
3324 .backlight_get_level = NULL, /* never called for DC */
3325 .hpd_sense = NULL,/* called unconditionally */
3326 .hpd_set_polarity = NULL, /* called unconditionally */
3327 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3328 .page_flip_get_scanoutpos =
3329 dm_crtc_get_scanoutpos,/* called unconditionally */
3330 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3331 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3334 #if defined(CONFIG_DEBUG_KERNEL_DC)
3336 static ssize_t s3_debug_store(struct device *device,
3337 struct device_attribute *attr,
3343 struct drm_device *drm_dev = dev_get_drvdata(device);
3344 struct amdgpu_device *adev = drm_dev->dev_private;
3346 ret = kstrtoint(buf, 0, &s3_state);
3351 drm_kms_helper_hotplug_event(adev->ddev);
3356 return ret == 0 ? count : 0;
3359 DEVICE_ATTR_WO(s3_debug);
3363 static int dm_early_init(void *handle)
3365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3367 switch (adev->asic_type) {
3370 adev->mode_info.num_crtc = 6;
3371 adev->mode_info.num_hpd = 6;
3372 adev->mode_info.num_dig = 6;
3375 adev->mode_info.num_crtc = 4;
3376 adev->mode_info.num_hpd = 6;
3377 adev->mode_info.num_dig = 7;
3381 adev->mode_info.num_crtc = 2;
3382 adev->mode_info.num_hpd = 6;
3383 adev->mode_info.num_dig = 6;
3387 adev->mode_info.num_crtc = 6;
3388 adev->mode_info.num_hpd = 6;
3389 adev->mode_info.num_dig = 7;
3392 adev->mode_info.num_crtc = 3;
3393 adev->mode_info.num_hpd = 6;
3394 adev->mode_info.num_dig = 9;
3397 adev->mode_info.num_crtc = 2;
3398 adev->mode_info.num_hpd = 6;
3399 adev->mode_info.num_dig = 9;
3401 case CHIP_POLARIS11:
3402 case CHIP_POLARIS12:
3403 adev->mode_info.num_crtc = 5;
3404 adev->mode_info.num_hpd = 5;
3405 adev->mode_info.num_dig = 5;
3407 case CHIP_POLARIS10:
3409 adev->mode_info.num_crtc = 6;
3410 adev->mode_info.num_hpd = 6;
3411 adev->mode_info.num_dig = 6;
3416 adev->mode_info.num_crtc = 6;
3417 adev->mode_info.num_hpd = 6;
3418 adev->mode_info.num_dig = 6;
3420 #if defined(CONFIG_DRM_AMD_DC_DCN)
3422 adev->mode_info.num_crtc = 4;
3423 adev->mode_info.num_hpd = 4;
3424 adev->mode_info.num_dig = 4;
3429 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3430 case CHIP_SIENNA_CICHLID:
3431 case CHIP_NAVY_FLOUNDER:
3433 adev->mode_info.num_crtc = 6;
3434 adev->mode_info.num_hpd = 6;
3435 adev->mode_info.num_dig = 6;
3438 adev->mode_info.num_crtc = 5;
3439 adev->mode_info.num_hpd = 5;
3440 adev->mode_info.num_dig = 5;
3443 adev->mode_info.num_crtc = 4;
3444 adev->mode_info.num_hpd = 4;
3445 adev->mode_info.num_dig = 4;
3448 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3452 amdgpu_dm_set_irq_funcs(adev);
3454 if (adev->mode_info.funcs == NULL)
3455 adev->mode_info.funcs = &dm_display_funcs;
3458 * Note: Do NOT change adev->audio_endpt_rreg and
3459 * adev->audio_endpt_wreg because they are initialised in
3460 * amdgpu_device_init()
3462 #if defined(CONFIG_DEBUG_KERNEL_DC)
3465 &dev_attr_s3_debug);
3471 static bool modeset_required(struct drm_crtc_state *crtc_state,
3472 struct dc_stream_state *new_stream,
3473 struct dc_stream_state *old_stream)
3475 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3478 if (!crtc_state->enable)
3481 return crtc_state->active;
3484 static bool modereset_required(struct drm_crtc_state *crtc_state)
3486 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3489 return !crtc_state->enable || !crtc_state->active;
3492 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3494 drm_encoder_cleanup(encoder);
3498 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3499 .destroy = amdgpu_dm_encoder_destroy,
3503 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3504 struct dc_scaling_info *scaling_info)
3506 int scale_w, scale_h;
3508 memset(scaling_info, 0, sizeof(*scaling_info));
3510 /* Source is fixed 16.16 but we ignore mantissa for now... */
3511 scaling_info->src_rect.x = state->src_x >> 16;
3512 scaling_info->src_rect.y = state->src_y >> 16;
3514 scaling_info->src_rect.width = state->src_w >> 16;
3515 if (scaling_info->src_rect.width == 0)
3518 scaling_info->src_rect.height = state->src_h >> 16;
3519 if (scaling_info->src_rect.height == 0)
3522 scaling_info->dst_rect.x = state->crtc_x;
3523 scaling_info->dst_rect.y = state->crtc_y;
3525 if (state->crtc_w == 0)
3528 scaling_info->dst_rect.width = state->crtc_w;
3530 if (state->crtc_h == 0)
3533 scaling_info->dst_rect.height = state->crtc_h;
3535 /* DRM doesn't specify clipping on destination output. */
3536 scaling_info->clip_rect = scaling_info->dst_rect;
3538 /* TODO: Validate scaling per-format with DC plane caps */
3539 scale_w = scaling_info->dst_rect.width * 1000 /
3540 scaling_info->src_rect.width;
3542 if (scale_w < 250 || scale_w > 16000)
3545 scale_h = scaling_info->dst_rect.height * 1000 /
3546 scaling_info->src_rect.height;
3548 if (scale_h < 250 || scale_h > 16000)
3552 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3553 * assume reasonable defaults based on the format.
3559 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3560 uint64_t *tiling_flags, bool *tmz_surface)
3562 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3563 int r = amdgpu_bo_reserve(rbo, false);
3566 /* Don't show error message when returning -ERESTARTSYS */
3567 if (r != -ERESTARTSYS)
3568 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3573 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3576 *tmz_surface = amdgpu_bo_encrypted(rbo);
3578 amdgpu_bo_unreserve(rbo);
3583 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3585 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3587 return offset ? (address + offset * 256) : 0;
3591 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3592 const struct amdgpu_framebuffer *afb,
3593 const enum surface_pixel_format format,
3594 const enum dc_rotation_angle rotation,
3595 const struct plane_size *plane_size,
3596 const union dc_tiling_info *tiling_info,
3597 const uint64_t info,
3598 struct dc_plane_dcc_param *dcc,
3599 struct dc_plane_address *address,
3600 bool force_disable_dcc)
3602 struct dc *dc = adev->dm.dc;
3603 struct dc_dcc_surface_param input;
3604 struct dc_surface_dcc_cap output;
3605 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3606 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3607 uint64_t dcc_address;
3609 memset(&input, 0, sizeof(input));
3610 memset(&output, 0, sizeof(output));
3612 if (force_disable_dcc)
3618 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3621 if (!dc->cap_funcs.get_dcc_compression_cap)
3624 input.format = format;
3625 input.surface_size.width = plane_size->surface_size.width;
3626 input.surface_size.height = plane_size->surface_size.height;
3627 input.swizzle_mode = tiling_info->gfx9.swizzle;
3629 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3630 input.scan = SCAN_DIRECTION_HORIZONTAL;
3631 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3632 input.scan = SCAN_DIRECTION_VERTICAL;
3634 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3637 if (!output.capable)
3640 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3645 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3646 dcc->independent_64b_blks = i64b;
3648 dcc_address = get_dcc_address(afb->address, info);
3649 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3650 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3656 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3657 const struct amdgpu_framebuffer *afb,
3658 const enum surface_pixel_format format,
3659 const enum dc_rotation_angle rotation,
3660 const uint64_t tiling_flags,
3661 union dc_tiling_info *tiling_info,
3662 struct plane_size *plane_size,
3663 struct dc_plane_dcc_param *dcc,
3664 struct dc_plane_address *address,
3666 bool force_disable_dcc)
3668 const struct drm_framebuffer *fb = &afb->base;
3671 memset(tiling_info, 0, sizeof(*tiling_info));
3672 memset(plane_size, 0, sizeof(*plane_size));
3673 memset(dcc, 0, sizeof(*dcc));
3674 memset(address, 0, sizeof(*address));
3676 address->tmz_surface = tmz_surface;
3678 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3679 plane_size->surface_size.x = 0;
3680 plane_size->surface_size.y = 0;
3681 plane_size->surface_size.width = fb->width;
3682 plane_size->surface_size.height = fb->height;
3683 plane_size->surface_pitch =
3684 fb->pitches[0] / fb->format->cpp[0];
3686 address->type = PLN_ADDR_TYPE_GRAPHICS;
3687 address->grph.addr.low_part = lower_32_bits(afb->address);
3688 address->grph.addr.high_part = upper_32_bits(afb->address);
3689 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3690 uint64_t chroma_addr = afb->address + fb->offsets[1];
3692 plane_size->surface_size.x = 0;
3693 plane_size->surface_size.y = 0;
3694 plane_size->surface_size.width = fb->width;
3695 plane_size->surface_size.height = fb->height;
3696 plane_size->surface_pitch =
3697 fb->pitches[0] / fb->format->cpp[0];
3699 plane_size->chroma_size.x = 0;
3700 plane_size->chroma_size.y = 0;
3701 /* TODO: set these based on surface format */
3702 plane_size->chroma_size.width = fb->width / 2;
3703 plane_size->chroma_size.height = fb->height / 2;
3705 plane_size->chroma_pitch =
3706 fb->pitches[1] / fb->format->cpp[1];
3708 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3709 address->video_progressive.luma_addr.low_part =
3710 lower_32_bits(afb->address);
3711 address->video_progressive.luma_addr.high_part =
3712 upper_32_bits(afb->address);
3713 address->video_progressive.chroma_addr.low_part =
3714 lower_32_bits(chroma_addr);
3715 address->video_progressive.chroma_addr.high_part =
3716 upper_32_bits(chroma_addr);
3719 /* Fill GFX8 params */
3720 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3721 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3723 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3724 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3725 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3726 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3727 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3729 /* XXX fix me for VI */
3730 tiling_info->gfx8.num_banks = num_banks;
3731 tiling_info->gfx8.array_mode =
3732 DC_ARRAY_2D_TILED_THIN1;
3733 tiling_info->gfx8.tile_split = tile_split;
3734 tiling_info->gfx8.bank_width = bankw;
3735 tiling_info->gfx8.bank_height = bankh;
3736 tiling_info->gfx8.tile_aspect = mtaspect;
3737 tiling_info->gfx8.tile_mode =
3738 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3739 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3740 == DC_ARRAY_1D_TILED_THIN1) {
3741 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3744 tiling_info->gfx8.pipe_config =
3745 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3747 if (adev->asic_type == CHIP_VEGA10 ||
3748 adev->asic_type == CHIP_VEGA12 ||
3749 adev->asic_type == CHIP_VEGA20 ||
3750 adev->asic_type == CHIP_NAVI10 ||
3751 adev->asic_type == CHIP_NAVI14 ||
3752 adev->asic_type == CHIP_NAVI12 ||
3753 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3754 adev->asic_type == CHIP_SIENNA_CICHLID ||
3755 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3757 adev->asic_type == CHIP_RENOIR ||
3758 adev->asic_type == CHIP_RAVEN) {
3759 /* Fill GFX9 params */
3760 tiling_info->gfx9.num_pipes =
3761 adev->gfx.config.gb_addr_config_fields.num_pipes;
3762 tiling_info->gfx9.num_banks =
3763 adev->gfx.config.gb_addr_config_fields.num_banks;
3764 tiling_info->gfx9.pipe_interleave =
3765 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3766 tiling_info->gfx9.num_shader_engines =
3767 adev->gfx.config.gb_addr_config_fields.num_se;
3768 tiling_info->gfx9.max_compressed_frags =
3769 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3770 tiling_info->gfx9.num_rb_per_se =
3771 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3772 tiling_info->gfx9.swizzle =
3773 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3774 tiling_info->gfx9.shaderEnable = 1;
3776 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3777 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3778 adev->asic_type == CHIP_NAVY_FLOUNDER)
3779 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3781 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3782 plane_size, tiling_info,
3783 tiling_flags, dcc, address,
3793 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3794 bool *per_pixel_alpha, bool *global_alpha,
3795 int *global_alpha_value)
3797 *per_pixel_alpha = false;
3798 *global_alpha = false;
3799 *global_alpha_value = 0xff;
3801 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3804 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3805 static const uint32_t alpha_formats[] = {
3806 DRM_FORMAT_ARGB8888,
3807 DRM_FORMAT_RGBA8888,
3808 DRM_FORMAT_ABGR8888,
3810 uint32_t format = plane_state->fb->format->format;
3813 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3814 if (format == alpha_formats[i]) {
3815 *per_pixel_alpha = true;
3821 if (plane_state->alpha < 0xffff) {
3822 *global_alpha = true;
3823 *global_alpha_value = plane_state->alpha >> 8;
3828 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3829 const enum surface_pixel_format format,
3830 enum dc_color_space *color_space)
3834 *color_space = COLOR_SPACE_SRGB;
3836 /* DRM color properties only affect non-RGB formats. */
3837 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3840 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3842 switch (plane_state->color_encoding) {
3843 case DRM_COLOR_YCBCR_BT601:
3845 *color_space = COLOR_SPACE_YCBCR601;
3847 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3850 case DRM_COLOR_YCBCR_BT709:
3852 *color_space = COLOR_SPACE_YCBCR709;
3854 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3857 case DRM_COLOR_YCBCR_BT2020:
3859 *color_space = COLOR_SPACE_2020_YCBCR;
3872 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3873 const struct drm_plane_state *plane_state,
3874 const uint64_t tiling_flags,
3875 struct dc_plane_info *plane_info,
3876 struct dc_plane_address *address,
3878 bool force_disable_dcc)
3880 const struct drm_framebuffer *fb = plane_state->fb;
3881 const struct amdgpu_framebuffer *afb =
3882 to_amdgpu_framebuffer(plane_state->fb);
3883 struct drm_format_name_buf format_name;
3886 memset(plane_info, 0, sizeof(*plane_info));
3888 switch (fb->format->format) {
3890 plane_info->format =
3891 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3893 case DRM_FORMAT_RGB565:
3894 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3896 case DRM_FORMAT_XRGB8888:
3897 case DRM_FORMAT_ARGB8888:
3898 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3900 case DRM_FORMAT_XRGB2101010:
3901 case DRM_FORMAT_ARGB2101010:
3902 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3904 case DRM_FORMAT_XBGR2101010:
3905 case DRM_FORMAT_ABGR2101010:
3906 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3908 case DRM_FORMAT_XBGR8888:
3909 case DRM_FORMAT_ABGR8888:
3910 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3912 case DRM_FORMAT_NV21:
3913 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3915 case DRM_FORMAT_NV12:
3916 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3918 case DRM_FORMAT_P010:
3919 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3921 case DRM_FORMAT_XRGB16161616F:
3922 case DRM_FORMAT_ARGB16161616F:
3923 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3925 case DRM_FORMAT_XBGR16161616F:
3926 case DRM_FORMAT_ABGR16161616F:
3927 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3931 "Unsupported screen format %s\n",
3932 drm_get_format_name(fb->format->format, &format_name));
3936 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3937 case DRM_MODE_ROTATE_0:
3938 plane_info->rotation = ROTATION_ANGLE_0;
3940 case DRM_MODE_ROTATE_90:
3941 plane_info->rotation = ROTATION_ANGLE_90;
3943 case DRM_MODE_ROTATE_180:
3944 plane_info->rotation = ROTATION_ANGLE_180;
3946 case DRM_MODE_ROTATE_270:
3947 plane_info->rotation = ROTATION_ANGLE_270;
3950 plane_info->rotation = ROTATION_ANGLE_0;
3954 plane_info->visible = true;
3955 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3957 plane_info->layer_index = 0;
3959 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3960 &plane_info->color_space);
3964 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3965 plane_info->rotation, tiling_flags,
3966 &plane_info->tiling_info,
3967 &plane_info->plane_size,
3968 &plane_info->dcc, address, tmz_surface,
3973 fill_blending_from_plane_state(
3974 plane_state, &plane_info->per_pixel_alpha,
3975 &plane_info->global_alpha, &plane_info->global_alpha_value);
3980 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3981 struct dc_plane_state *dc_plane_state,
3982 struct drm_plane_state *plane_state,
3983 struct drm_crtc_state *crtc_state)
3985 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3986 const struct amdgpu_framebuffer *amdgpu_fb =
3987 to_amdgpu_framebuffer(plane_state->fb);
3988 struct dc_scaling_info scaling_info;
3989 struct dc_plane_info plane_info;
3990 uint64_t tiling_flags;
3992 bool tmz_surface = false;
3993 bool force_disable_dcc = false;
3995 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3999 dc_plane_state->src_rect = scaling_info.src_rect;
4000 dc_plane_state->dst_rect = scaling_info.dst_rect;
4001 dc_plane_state->clip_rect = scaling_info.clip_rect;
4002 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4004 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
4008 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4009 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
4011 &dc_plane_state->address,
4017 dc_plane_state->format = plane_info.format;
4018 dc_plane_state->color_space = plane_info.color_space;
4019 dc_plane_state->format = plane_info.format;
4020 dc_plane_state->plane_size = plane_info.plane_size;
4021 dc_plane_state->rotation = plane_info.rotation;
4022 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4023 dc_plane_state->stereo_format = plane_info.stereo_format;
4024 dc_plane_state->tiling_info = plane_info.tiling_info;
4025 dc_plane_state->visible = plane_info.visible;
4026 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4027 dc_plane_state->global_alpha = plane_info.global_alpha;
4028 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4029 dc_plane_state->dcc = plane_info.dcc;
4030 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4033 * Always set input transfer function, since plane state is refreshed
4036 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4043 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4044 const struct dm_connector_state *dm_state,
4045 struct dc_stream_state *stream)
4047 enum amdgpu_rmx_type rmx_type;
4049 struct rect src = { 0 }; /* viewport in composition space*/
4050 struct rect dst = { 0 }; /* stream addressable area */
4052 /* no mode. nothing to be done */
4056 /* Full screen scaling by default */
4057 src.width = mode->hdisplay;
4058 src.height = mode->vdisplay;
4059 dst.width = stream->timing.h_addressable;
4060 dst.height = stream->timing.v_addressable;
4063 rmx_type = dm_state->scaling;
4064 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4065 if (src.width * dst.height <
4066 src.height * dst.width) {
4067 /* height needs less upscaling/more downscaling */
4068 dst.width = src.width *
4069 dst.height / src.height;
4071 /* width needs less upscaling/more downscaling */
4072 dst.height = src.height *
4073 dst.width / src.width;
4075 } else if (rmx_type == RMX_CENTER) {
4079 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4080 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4082 if (dm_state->underscan_enable) {
4083 dst.x += dm_state->underscan_hborder / 2;
4084 dst.y += dm_state->underscan_vborder / 2;
4085 dst.width -= dm_state->underscan_hborder;
4086 dst.height -= dm_state->underscan_vborder;
4093 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4094 dst.x, dst.y, dst.width, dst.height);
4098 static enum dc_color_depth
4099 convert_color_depth_from_display_info(const struct drm_connector *connector,
4100 bool is_y420, int requested_bpc)
4107 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4108 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4110 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4112 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4115 bpc = (uint8_t)connector->display_info.bpc;
4116 /* Assume 8 bpc by default if no bpc is specified. */
4117 bpc = bpc ? bpc : 8;
4120 if (requested_bpc > 0) {
4122 * Cap display bpc based on the user requested value.
4124 * The value for state->max_bpc may not correctly updated
4125 * depending on when the connector gets added to the state
4126 * or if this was called outside of atomic check, so it
4127 * can't be used directly.
4129 bpc = min_t(u8, bpc, requested_bpc);
4131 /* Round down to the nearest even number. */
4132 bpc = bpc - (bpc & 1);
4138 * Temporary Work around, DRM doesn't parse color depth for
4139 * EDID revision before 1.4
4140 * TODO: Fix edid parsing
4142 return COLOR_DEPTH_888;
4144 return COLOR_DEPTH_666;
4146 return COLOR_DEPTH_888;
4148 return COLOR_DEPTH_101010;
4150 return COLOR_DEPTH_121212;
4152 return COLOR_DEPTH_141414;
4154 return COLOR_DEPTH_161616;
4156 return COLOR_DEPTH_UNDEFINED;
4160 static enum dc_aspect_ratio
4161 get_aspect_ratio(const struct drm_display_mode *mode_in)
4163 /* 1-1 mapping, since both enums follow the HDMI spec. */
4164 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4167 static enum dc_color_space
4168 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4170 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4172 switch (dc_crtc_timing->pixel_encoding) {
4173 case PIXEL_ENCODING_YCBCR422:
4174 case PIXEL_ENCODING_YCBCR444:
4175 case PIXEL_ENCODING_YCBCR420:
4178 * 27030khz is the separation point between HDTV and SDTV
4179 * according to HDMI spec, we use YCbCr709 and YCbCr601
4182 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4183 if (dc_crtc_timing->flags.Y_ONLY)
4185 COLOR_SPACE_YCBCR709_LIMITED;
4187 color_space = COLOR_SPACE_YCBCR709;
4189 if (dc_crtc_timing->flags.Y_ONLY)
4191 COLOR_SPACE_YCBCR601_LIMITED;
4193 color_space = COLOR_SPACE_YCBCR601;
4198 case PIXEL_ENCODING_RGB:
4199 color_space = COLOR_SPACE_SRGB;
4210 static bool adjust_colour_depth_from_display_info(
4211 struct dc_crtc_timing *timing_out,
4212 const struct drm_display_info *info)
4214 enum dc_color_depth depth = timing_out->display_color_depth;
4217 normalized_clk = timing_out->pix_clk_100hz / 10;
4218 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4219 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4220 normalized_clk /= 2;
4221 /* Adjusting pix clock following on HDMI spec based on colour depth */
4223 case COLOR_DEPTH_888:
4225 case COLOR_DEPTH_101010:
4226 normalized_clk = (normalized_clk * 30) / 24;
4228 case COLOR_DEPTH_121212:
4229 normalized_clk = (normalized_clk * 36) / 24;
4231 case COLOR_DEPTH_161616:
4232 normalized_clk = (normalized_clk * 48) / 24;
4235 /* The above depths are the only ones valid for HDMI. */
4238 if (normalized_clk <= info->max_tmds_clock) {
4239 timing_out->display_color_depth = depth;
4242 } while (--depth > COLOR_DEPTH_666);
4246 static void fill_stream_properties_from_drm_display_mode(
4247 struct dc_stream_state *stream,
4248 const struct drm_display_mode *mode_in,
4249 const struct drm_connector *connector,
4250 const struct drm_connector_state *connector_state,
4251 const struct dc_stream_state *old_stream,
4254 struct dc_crtc_timing *timing_out = &stream->timing;
4255 const struct drm_display_info *info = &connector->display_info;
4256 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4257 struct hdmi_vendor_infoframe hv_frame;
4258 struct hdmi_avi_infoframe avi_frame;
4260 memset(&hv_frame, 0, sizeof(hv_frame));
4261 memset(&avi_frame, 0, sizeof(avi_frame));
4263 timing_out->h_border_left = 0;
4264 timing_out->h_border_right = 0;
4265 timing_out->v_border_top = 0;
4266 timing_out->v_border_bottom = 0;
4267 /* TODO: un-hardcode */
4268 if (drm_mode_is_420_only(info, mode_in)
4269 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4270 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4271 else if (drm_mode_is_420_also(info, mode_in)
4272 && aconnector->force_yuv420_output)
4273 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4274 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4275 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4276 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4278 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4280 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4281 timing_out->display_color_depth = convert_color_depth_from_display_info(
4283 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4285 timing_out->scan_type = SCANNING_TYPE_NODATA;
4286 timing_out->hdmi_vic = 0;
4289 timing_out->vic = old_stream->timing.vic;
4290 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4291 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4293 timing_out->vic = drm_match_cea_mode(mode_in);
4294 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4295 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4296 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4297 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4300 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4301 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4302 timing_out->vic = avi_frame.video_code;
4303 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4304 timing_out->hdmi_vic = hv_frame.vic;
4307 timing_out->h_addressable = mode_in->crtc_hdisplay;
4308 timing_out->h_total = mode_in->crtc_htotal;
4309 timing_out->h_sync_width =
4310 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4311 timing_out->h_front_porch =
4312 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4313 timing_out->v_total = mode_in->crtc_vtotal;
4314 timing_out->v_addressable = mode_in->crtc_vdisplay;
4315 timing_out->v_front_porch =
4316 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4317 timing_out->v_sync_width =
4318 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4319 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4320 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4322 stream->output_color_space = get_output_color_space(timing_out);
4324 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4325 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4326 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4327 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4328 drm_mode_is_420_also(info, mode_in) &&
4329 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4330 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4331 adjust_colour_depth_from_display_info(timing_out, info);
4336 static void fill_audio_info(struct audio_info *audio_info,
4337 const struct drm_connector *drm_connector,
4338 const struct dc_sink *dc_sink)
4341 int cea_revision = 0;
4342 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4344 audio_info->manufacture_id = edid_caps->manufacturer_id;
4345 audio_info->product_id = edid_caps->product_id;
4347 cea_revision = drm_connector->display_info.cea_rev;
4349 strscpy(audio_info->display_name,
4350 edid_caps->display_name,
4351 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4353 if (cea_revision >= 3) {
4354 audio_info->mode_count = edid_caps->audio_mode_count;
4356 for (i = 0; i < audio_info->mode_count; ++i) {
4357 audio_info->modes[i].format_code =
4358 (enum audio_format_code)
4359 (edid_caps->audio_modes[i].format_code);
4360 audio_info->modes[i].channel_count =
4361 edid_caps->audio_modes[i].channel_count;
4362 audio_info->modes[i].sample_rates.all =
4363 edid_caps->audio_modes[i].sample_rate;
4364 audio_info->modes[i].sample_size =
4365 edid_caps->audio_modes[i].sample_size;
4369 audio_info->flags.all = edid_caps->speaker_flags;
4371 /* TODO: We only check for the progressive mode, check for interlace mode too */
4372 if (drm_connector->latency_present[0]) {
4373 audio_info->video_latency = drm_connector->video_latency[0];
4374 audio_info->audio_latency = drm_connector->audio_latency[0];
4377 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4382 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4383 struct drm_display_mode *dst_mode)
4385 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4386 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4387 dst_mode->crtc_clock = src_mode->crtc_clock;
4388 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4389 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4390 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4391 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4392 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4393 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4394 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4395 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4396 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4397 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4398 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4402 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4403 const struct drm_display_mode *native_mode,
4406 if (scale_enabled) {
4407 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4408 } else if (native_mode->clock == drm_mode->clock &&
4409 native_mode->htotal == drm_mode->htotal &&
4410 native_mode->vtotal == drm_mode->vtotal) {
4411 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4413 /* no scaling nor amdgpu inserted, no need to patch */
4417 static struct dc_sink *
4418 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4420 struct dc_sink_init_data sink_init_data = { 0 };
4421 struct dc_sink *sink = NULL;
4422 sink_init_data.link = aconnector->dc_link;
4423 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4425 sink = dc_sink_create(&sink_init_data);
4427 DRM_ERROR("Failed to create sink!\n");
4430 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4435 static void set_multisync_trigger_params(
4436 struct dc_stream_state *stream)
4438 if (stream->triggered_crtc_reset.enabled) {
4439 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4440 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4444 static void set_master_stream(struct dc_stream_state *stream_set[],
4447 int j, highest_rfr = 0, master_stream = 0;
4449 for (j = 0; j < stream_count; j++) {
4450 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4451 int refresh_rate = 0;
4453 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4454 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4455 if (refresh_rate > highest_rfr) {
4456 highest_rfr = refresh_rate;
4461 for (j = 0; j < stream_count; j++) {
4463 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4467 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4471 if (context->stream_count < 2)
4473 for (i = 0; i < context->stream_count ; i++) {
4474 if (!context->streams[i])
4477 * TODO: add a function to read AMD VSDB bits and set
4478 * crtc_sync_master.multi_sync_enabled flag
4479 * For now it's set to false
4481 set_multisync_trigger_params(context->streams[i]);
4483 set_master_stream(context->streams, context->stream_count);
4486 static struct dc_stream_state *
4487 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4488 const struct drm_display_mode *drm_mode,
4489 const struct dm_connector_state *dm_state,
4490 const struct dc_stream_state *old_stream,
4493 struct drm_display_mode *preferred_mode = NULL;
4494 struct drm_connector *drm_connector;
4495 const struct drm_connector_state *con_state =
4496 dm_state ? &dm_state->base : NULL;
4497 struct dc_stream_state *stream = NULL;
4498 struct drm_display_mode mode = *drm_mode;
4499 bool native_mode_found = false;
4500 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4502 int preferred_refresh = 0;
4503 #if defined(CONFIG_DRM_AMD_DC_DCN)
4504 struct dsc_dec_dpcd_caps dsc_caps;
4506 uint32_t link_bandwidth_kbps;
4508 struct dc_sink *sink = NULL;
4509 if (aconnector == NULL) {
4510 DRM_ERROR("aconnector is NULL!\n");
4514 drm_connector = &aconnector->base;
4516 if (!aconnector->dc_sink) {
4517 sink = create_fake_sink(aconnector);
4521 sink = aconnector->dc_sink;
4522 dc_sink_retain(sink);
4525 stream = dc_create_stream_for_sink(sink);
4527 if (stream == NULL) {
4528 DRM_ERROR("Failed to create stream for sink!\n");
4532 stream->dm_stream_context = aconnector;
4534 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4535 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4537 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4538 /* Search for preferred mode */
4539 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4540 native_mode_found = true;
4544 if (!native_mode_found)
4545 preferred_mode = list_first_entry_or_null(
4546 &aconnector->base.modes,
4547 struct drm_display_mode,
4550 mode_refresh = drm_mode_vrefresh(&mode);
4552 if (preferred_mode == NULL) {
4554 * This may not be an error, the use case is when we have no
4555 * usermode calls to reset and set mode upon hotplug. In this
4556 * case, we call set mode ourselves to restore the previous mode
4557 * and the modelist may not be filled in in time.
4559 DRM_DEBUG_DRIVER("No preferred mode found\n");
4561 decide_crtc_timing_for_drm_display_mode(
4562 &mode, preferred_mode,
4563 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4564 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4568 drm_mode_set_crtcinfo(&mode, 0);
4571 * If scaling is enabled and refresh rate didn't change
4572 * we copy the vic and polarities of the old timings
4574 if (!scale || mode_refresh != preferred_refresh)
4575 fill_stream_properties_from_drm_display_mode(stream,
4576 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4578 fill_stream_properties_from_drm_display_mode(stream,
4579 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4581 stream->timing.flags.DSC = 0;
4583 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4584 #if defined(CONFIG_DRM_AMD_DC_DCN)
4585 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4586 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4587 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4590 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4591 dc_link_get_link_cap(aconnector->dc_link));
4593 #if defined(CONFIG_DRM_AMD_DC_DCN)
4594 if (dsc_caps.is_dsc_supported)
4595 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4597 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4598 link_bandwidth_kbps,
4600 &stream->timing.dsc_cfg))
4601 stream->timing.flags.DSC = 1;
4605 update_stream_scaling_settings(&mode, dm_state, stream);
4608 &stream->audio_info,
4612 update_stream_signal(stream, sink);
4614 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4615 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4616 if (stream->link->psr_settings.psr_feature_enabled) {
4618 // should decide stream support vsc sdp colorimetry capability
4619 // before building vsc info packet
4621 stream->use_vsc_sdp_for_colorimetry = false;
4622 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4623 stream->use_vsc_sdp_for_colorimetry =
4624 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4626 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4627 stream->use_vsc_sdp_for_colorimetry = true;
4629 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4632 dc_sink_release(sink);
4637 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4639 drm_crtc_cleanup(crtc);
4643 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4644 struct drm_crtc_state *state)
4646 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4648 /* TODO Destroy dc_stream objects are stream object is flattened */
4650 dc_stream_release(cur->stream);
4653 __drm_atomic_helper_crtc_destroy_state(state);
4659 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4661 struct dm_crtc_state *state;
4664 dm_crtc_destroy_state(crtc, crtc->state);
4666 state = kzalloc(sizeof(*state), GFP_KERNEL);
4667 if (WARN_ON(!state))
4670 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4673 static struct drm_crtc_state *
4674 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4676 struct dm_crtc_state *state, *cur;
4678 cur = to_dm_crtc_state(crtc->state);
4680 if (WARN_ON(!crtc->state))
4683 state = kzalloc(sizeof(*state), GFP_KERNEL);
4687 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4690 state->stream = cur->stream;
4691 dc_stream_retain(state->stream);
4694 state->active_planes = cur->active_planes;
4695 state->vrr_params = cur->vrr_params;
4696 state->vrr_infopacket = cur->vrr_infopacket;
4697 state->abm_level = cur->abm_level;
4698 state->vrr_supported = cur->vrr_supported;
4699 state->freesync_config = cur->freesync_config;
4700 state->crc_src = cur->crc_src;
4701 state->cm_has_degamma = cur->cm_has_degamma;
4702 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4704 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4706 return &state->base;
4709 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4711 enum dc_irq_source irq_source;
4712 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4713 struct amdgpu_device *adev = crtc->dev->dev_private;
4716 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4718 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4720 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4721 acrtc->crtc_id, enable ? "en" : "dis", rc);
4725 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4727 enum dc_irq_source irq_source;
4728 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4729 struct amdgpu_device *adev = crtc->dev->dev_private;
4730 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4734 /* vblank irq on -> Only need vupdate irq in vrr mode */
4735 if (amdgpu_dm_vrr_active(acrtc_state))
4736 rc = dm_set_vupdate_irq(crtc, true);
4738 /* vblank irq off -> vupdate irq off */
4739 rc = dm_set_vupdate_irq(crtc, false);
4745 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4746 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4749 static int dm_enable_vblank(struct drm_crtc *crtc)
4751 return dm_set_vblank(crtc, true);
4754 static void dm_disable_vblank(struct drm_crtc *crtc)
4756 dm_set_vblank(crtc, false);
4759 /* Implemented only the options currently availible for the driver */
4760 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4761 .reset = dm_crtc_reset_state,
4762 .destroy = amdgpu_dm_crtc_destroy,
4763 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4764 .set_config = drm_atomic_helper_set_config,
4765 .page_flip = drm_atomic_helper_page_flip,
4766 .atomic_duplicate_state = dm_crtc_duplicate_state,
4767 .atomic_destroy_state = dm_crtc_destroy_state,
4768 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4769 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4770 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4771 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4772 .enable_vblank = dm_enable_vblank,
4773 .disable_vblank = dm_disable_vblank,
4774 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4777 static enum drm_connector_status
4778 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4781 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4785 * 1. This interface is NOT called in context of HPD irq.
4786 * 2. This interface *is called* in context of user-mode ioctl. Which
4787 * makes it a bad place for *any* MST-related activity.
4790 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4791 !aconnector->fake_enable)
4792 connected = (aconnector->dc_sink != NULL);
4794 connected = (aconnector->base.force == DRM_FORCE_ON);
4796 update_subconnector_property(aconnector);
4798 return (connected ? connector_status_connected :
4799 connector_status_disconnected);
4802 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4803 struct drm_connector_state *connector_state,
4804 struct drm_property *property,
4807 struct drm_device *dev = connector->dev;
4808 struct amdgpu_device *adev = dev->dev_private;
4809 struct dm_connector_state *dm_old_state =
4810 to_dm_connector_state(connector->state);
4811 struct dm_connector_state *dm_new_state =
4812 to_dm_connector_state(connector_state);
4816 if (property == dev->mode_config.scaling_mode_property) {
4817 enum amdgpu_rmx_type rmx_type;
4820 case DRM_MODE_SCALE_CENTER:
4821 rmx_type = RMX_CENTER;
4823 case DRM_MODE_SCALE_ASPECT:
4824 rmx_type = RMX_ASPECT;
4826 case DRM_MODE_SCALE_FULLSCREEN:
4827 rmx_type = RMX_FULL;
4829 case DRM_MODE_SCALE_NONE:
4835 if (dm_old_state->scaling == rmx_type)
4838 dm_new_state->scaling = rmx_type;
4840 } else if (property == adev->mode_info.underscan_hborder_property) {
4841 dm_new_state->underscan_hborder = val;
4843 } else if (property == adev->mode_info.underscan_vborder_property) {
4844 dm_new_state->underscan_vborder = val;
4846 } else if (property == adev->mode_info.underscan_property) {
4847 dm_new_state->underscan_enable = val;
4849 } else if (property == adev->mode_info.abm_level_property) {
4850 dm_new_state->abm_level = val;
4857 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4858 const struct drm_connector_state *state,
4859 struct drm_property *property,
4862 struct drm_device *dev = connector->dev;
4863 struct amdgpu_device *adev = dev->dev_private;
4864 struct dm_connector_state *dm_state =
4865 to_dm_connector_state(state);
4868 if (property == dev->mode_config.scaling_mode_property) {
4869 switch (dm_state->scaling) {
4871 *val = DRM_MODE_SCALE_CENTER;
4874 *val = DRM_MODE_SCALE_ASPECT;
4877 *val = DRM_MODE_SCALE_FULLSCREEN;
4881 *val = DRM_MODE_SCALE_NONE;
4885 } else if (property == adev->mode_info.underscan_hborder_property) {
4886 *val = dm_state->underscan_hborder;
4888 } else if (property == adev->mode_info.underscan_vborder_property) {
4889 *val = dm_state->underscan_vborder;
4891 } else if (property == adev->mode_info.underscan_property) {
4892 *val = dm_state->underscan_enable;
4894 } else if (property == adev->mode_info.abm_level_property) {
4895 *val = dm_state->abm_level;
4902 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4904 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4906 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4909 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4911 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4912 const struct dc_link *link = aconnector->dc_link;
4913 struct amdgpu_device *adev = connector->dev->dev_private;
4914 struct amdgpu_display_manager *dm = &adev->dm;
4916 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4917 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4919 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4920 link->type != dc_connection_none &&
4921 dm->backlight_dev) {
4922 backlight_device_unregister(dm->backlight_dev);
4923 dm->backlight_dev = NULL;
4927 if (aconnector->dc_em_sink)
4928 dc_sink_release(aconnector->dc_em_sink);
4929 aconnector->dc_em_sink = NULL;
4930 if (aconnector->dc_sink)
4931 dc_sink_release(aconnector->dc_sink);
4932 aconnector->dc_sink = NULL;
4934 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4935 drm_connector_unregister(connector);
4936 drm_connector_cleanup(connector);
4937 if (aconnector->i2c) {
4938 i2c_del_adapter(&aconnector->i2c->base);
4939 kfree(aconnector->i2c);
4941 kfree(aconnector->dm_dp_aux.aux.name);
4946 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4948 struct dm_connector_state *state =
4949 to_dm_connector_state(connector->state);
4951 if (connector->state)
4952 __drm_atomic_helper_connector_destroy_state(connector->state);
4956 state = kzalloc(sizeof(*state), GFP_KERNEL);
4959 state->scaling = RMX_OFF;
4960 state->underscan_enable = false;
4961 state->underscan_hborder = 0;
4962 state->underscan_vborder = 0;
4963 state->base.max_requested_bpc = 8;
4964 state->vcpi_slots = 0;
4966 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4967 state->abm_level = amdgpu_dm_abm_level;
4969 __drm_atomic_helper_connector_reset(connector, &state->base);
4973 struct drm_connector_state *
4974 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4976 struct dm_connector_state *state =
4977 to_dm_connector_state(connector->state);
4979 struct dm_connector_state *new_state =
4980 kmemdup(state, sizeof(*state), GFP_KERNEL);
4985 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4987 new_state->freesync_capable = state->freesync_capable;
4988 new_state->abm_level = state->abm_level;
4989 new_state->scaling = state->scaling;
4990 new_state->underscan_enable = state->underscan_enable;
4991 new_state->underscan_hborder = state->underscan_hborder;
4992 new_state->underscan_vborder = state->underscan_vborder;
4993 new_state->vcpi_slots = state->vcpi_slots;
4994 new_state->pbn = state->pbn;
4995 return &new_state->base;
4999 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5001 struct amdgpu_dm_connector *amdgpu_dm_connector =
5002 to_amdgpu_dm_connector(connector);
5005 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5006 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5007 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5008 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5013 #if defined(CONFIG_DEBUG_FS)
5014 connector_debugfs_init(amdgpu_dm_connector);
5020 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5021 .reset = amdgpu_dm_connector_funcs_reset,
5022 .detect = amdgpu_dm_connector_detect,
5023 .fill_modes = drm_helper_probe_single_connector_modes,
5024 .destroy = amdgpu_dm_connector_destroy,
5025 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5026 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5027 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5028 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5029 .late_register = amdgpu_dm_connector_late_register,
5030 .early_unregister = amdgpu_dm_connector_unregister
5033 static int get_modes(struct drm_connector *connector)
5035 return amdgpu_dm_connector_get_modes(connector);
5038 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5040 struct dc_sink_init_data init_params = {
5041 .link = aconnector->dc_link,
5042 .sink_signal = SIGNAL_TYPE_VIRTUAL
5046 if (!aconnector->base.edid_blob_ptr) {
5047 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5048 aconnector->base.name);
5050 aconnector->base.force = DRM_FORCE_OFF;
5051 aconnector->base.override_edid = false;
5055 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5057 aconnector->edid = edid;
5059 aconnector->dc_em_sink = dc_link_add_remote_sink(
5060 aconnector->dc_link,
5062 (edid->extensions + 1) * EDID_LENGTH,
5065 if (aconnector->base.force == DRM_FORCE_ON) {
5066 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5067 aconnector->dc_link->local_sink :
5068 aconnector->dc_em_sink;
5069 dc_sink_retain(aconnector->dc_sink);
5073 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5075 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5078 * In case of headless boot with force on for DP managed connector
5079 * Those settings have to be != 0 to get initial modeset
5081 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5082 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5083 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5087 aconnector->base.override_edid = true;
5088 create_eml_sink(aconnector);
5091 static struct dc_stream_state *
5092 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5093 const struct drm_display_mode *drm_mode,
5094 const struct dm_connector_state *dm_state,
5095 const struct dc_stream_state *old_stream)
5097 struct drm_connector *connector = &aconnector->base;
5098 struct amdgpu_device *adev = connector->dev->dev_private;
5099 struct dc_stream_state *stream;
5100 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5101 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5102 enum dc_status dc_result = DC_OK;
5105 stream = create_stream_for_sink(aconnector, drm_mode,
5106 dm_state, old_stream,
5108 if (stream == NULL) {
5109 DRM_ERROR("Failed to create stream for sink!\n");
5113 dc_result = dc_validate_stream(adev->dm.dc, stream);
5115 if (dc_result != DC_OK) {
5116 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5121 dc_status_to_str(dc_result));
5123 dc_stream_release(stream);
5125 requested_bpc -= 2; /* lower bpc to retry validation */
5128 } while (stream == NULL && requested_bpc >= 6);
5133 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5134 struct drm_display_mode *mode)
5136 int result = MODE_ERROR;
5137 struct dc_sink *dc_sink;
5138 /* TODO: Unhardcode stream count */
5139 struct dc_stream_state *stream;
5140 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5142 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5143 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5147 * Only run this the first time mode_valid is called to initilialize
5150 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5151 !aconnector->dc_em_sink)
5152 handle_edid_mgmt(aconnector);
5154 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5156 if (dc_sink == NULL) {
5157 DRM_ERROR("dc_sink is NULL!\n");
5161 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5163 dc_stream_release(stream);
5168 /* TODO: error handling*/
5172 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5173 struct dc_info_packet *out)
5175 struct hdmi_drm_infoframe frame;
5176 unsigned char buf[30]; /* 26 + 4 */
5180 memset(out, 0, sizeof(*out));
5182 if (!state->hdr_output_metadata)
5185 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5189 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5193 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5197 /* Prepare the infopacket for DC. */
5198 switch (state->connector->connector_type) {
5199 case DRM_MODE_CONNECTOR_HDMIA:
5200 out->hb0 = 0x87; /* type */
5201 out->hb1 = 0x01; /* version */
5202 out->hb2 = 0x1A; /* length */
5203 out->sb[0] = buf[3]; /* checksum */
5207 case DRM_MODE_CONNECTOR_DisplayPort:
5208 case DRM_MODE_CONNECTOR_eDP:
5209 out->hb0 = 0x00; /* sdp id, zero */
5210 out->hb1 = 0x87; /* type */
5211 out->hb2 = 0x1D; /* payload len - 1 */
5212 out->hb3 = (0x13 << 2); /* sdp version */
5213 out->sb[0] = 0x01; /* version */
5214 out->sb[1] = 0x1A; /* length */
5222 memcpy(&out->sb[i], &buf[4], 26);
5225 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5226 sizeof(out->sb), false);
5232 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5233 const struct drm_connector_state *new_state)
5235 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5236 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5238 if (old_blob != new_blob) {
5239 if (old_blob && new_blob &&
5240 old_blob->length == new_blob->length)
5241 return memcmp(old_blob->data, new_blob->data,
5251 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5252 struct drm_atomic_state *state)
5254 struct drm_connector_state *new_con_state =
5255 drm_atomic_get_new_connector_state(state, conn);
5256 struct drm_connector_state *old_con_state =
5257 drm_atomic_get_old_connector_state(state, conn);
5258 struct drm_crtc *crtc = new_con_state->crtc;
5259 struct drm_crtc_state *new_crtc_state;
5265 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5266 struct dc_info_packet hdr_infopacket;
5268 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5272 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5273 if (IS_ERR(new_crtc_state))
5274 return PTR_ERR(new_crtc_state);
5277 * DC considers the stream backends changed if the
5278 * static metadata changes. Forcing the modeset also
5279 * gives a simple way for userspace to switch from
5280 * 8bpc to 10bpc when setting the metadata to enter
5283 * Changing the static metadata after it's been
5284 * set is permissible, however. So only force a
5285 * modeset if we're entering or exiting HDR.
5287 new_crtc_state->mode_changed =
5288 !old_con_state->hdr_output_metadata ||
5289 !new_con_state->hdr_output_metadata;
5295 static const struct drm_connector_helper_funcs
5296 amdgpu_dm_connector_helper_funcs = {
5298 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5299 * modes will be filtered by drm_mode_validate_size(), and those modes
5300 * are missing after user start lightdm. So we need to renew modes list.
5301 * in get_modes call back, not just return the modes count
5303 .get_modes = get_modes,
5304 .mode_valid = amdgpu_dm_connector_mode_valid,
5305 .atomic_check = amdgpu_dm_connector_atomic_check,
5308 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5312 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5314 struct drm_device *dev = new_crtc_state->crtc->dev;
5315 struct drm_plane *plane;
5317 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5318 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5325 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5327 struct drm_atomic_state *state = new_crtc_state->state;
5328 struct drm_plane *plane;
5331 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5332 struct drm_plane_state *new_plane_state;
5334 /* Cursor planes are "fake". */
5335 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5338 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5340 if (!new_plane_state) {
5342 * The plane is enable on the CRTC and hasn't changed
5343 * state. This means that it previously passed
5344 * validation and is therefore enabled.
5350 /* We need a framebuffer to be considered enabled. */
5351 num_active += (new_plane_state->fb != NULL);
5357 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5358 struct drm_crtc_state *new_crtc_state)
5360 struct dm_crtc_state *dm_new_crtc_state =
5361 to_dm_crtc_state(new_crtc_state);
5363 dm_new_crtc_state->active_planes = 0;
5365 if (!dm_new_crtc_state->stream)
5368 dm_new_crtc_state->active_planes =
5369 count_crtc_active_planes(new_crtc_state);
5372 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5373 struct drm_crtc_state *state)
5375 struct amdgpu_device *adev = crtc->dev->dev_private;
5376 struct dc *dc = adev->dm.dc;
5377 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5380 dm_update_crtc_active_planes(crtc, state);
5382 if (unlikely(!dm_crtc_state->stream &&
5383 modeset_required(state, NULL, dm_crtc_state->stream))) {
5388 /* In some use cases, like reset, no stream is attached */
5389 if (!dm_crtc_state->stream)
5393 * We want at least one hardware plane enabled to use
5394 * the stream with a cursor enabled.
5396 if (state->enable && state->active &&
5397 does_crtc_have_active_cursor(state) &&
5398 dm_crtc_state->active_planes == 0)
5401 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5407 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5408 const struct drm_display_mode *mode,
5409 struct drm_display_mode *adjusted_mode)
5414 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5415 .disable = dm_crtc_helper_disable,
5416 .atomic_check = dm_crtc_helper_atomic_check,
5417 .mode_fixup = dm_crtc_helper_mode_fixup,
5418 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5421 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5426 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5428 switch (display_color_depth) {
5429 case COLOR_DEPTH_666:
5431 case COLOR_DEPTH_888:
5433 case COLOR_DEPTH_101010:
5435 case COLOR_DEPTH_121212:
5437 case COLOR_DEPTH_141414:
5439 case COLOR_DEPTH_161616:
5447 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5448 struct drm_crtc_state *crtc_state,
5449 struct drm_connector_state *conn_state)
5451 struct drm_atomic_state *state = crtc_state->state;
5452 struct drm_connector *connector = conn_state->connector;
5453 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5454 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5455 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5456 struct drm_dp_mst_topology_mgr *mst_mgr;
5457 struct drm_dp_mst_port *mst_port;
5458 enum dc_color_depth color_depth;
5460 bool is_y420 = false;
5462 if (!aconnector->port || !aconnector->dc_sink)
5465 mst_port = aconnector->port;
5466 mst_mgr = &aconnector->mst_port->mst_mgr;
5468 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5471 if (!state->duplicated) {
5472 int max_bpc = conn_state->max_requested_bpc;
5473 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5474 aconnector->force_yuv420_output;
5475 color_depth = convert_color_depth_from_display_info(connector,
5478 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5479 clock = adjusted_mode->clock;
5480 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5482 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5485 dm_new_connector_state->pbn,
5486 dm_mst_get_pbn_divider(aconnector->dc_link));
5487 if (dm_new_connector_state->vcpi_slots < 0) {
5488 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5489 return dm_new_connector_state->vcpi_slots;
5494 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5495 .disable = dm_encoder_helper_disable,
5496 .atomic_check = dm_encoder_helper_atomic_check
5499 #if defined(CONFIG_DRM_AMD_DC_DCN)
5500 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5501 struct dc_state *dc_state)
5503 struct dc_stream_state *stream = NULL;
5504 struct drm_connector *connector;
5505 struct drm_connector_state *new_con_state, *old_con_state;
5506 struct amdgpu_dm_connector *aconnector;
5507 struct dm_connector_state *dm_conn_state;
5508 int i, j, clock, bpp;
5509 int vcpi, pbn_div, pbn = 0;
5511 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5513 aconnector = to_amdgpu_dm_connector(connector);
5515 if (!aconnector->port)
5518 if (!new_con_state || !new_con_state->crtc)
5521 dm_conn_state = to_dm_connector_state(new_con_state);
5523 for (j = 0; j < dc_state->stream_count; j++) {
5524 stream = dc_state->streams[j];
5528 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5537 if (stream->timing.flags.DSC != 1) {
5538 drm_dp_mst_atomic_enable_dsc(state,
5546 pbn_div = dm_mst_get_pbn_divider(stream->link);
5547 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5548 clock = stream->timing.pix_clk_100hz / 10;
5549 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5550 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5557 dm_conn_state->pbn = pbn;
5558 dm_conn_state->vcpi_slots = vcpi;
5564 static void dm_drm_plane_reset(struct drm_plane *plane)
5566 struct dm_plane_state *amdgpu_state = NULL;
5569 plane->funcs->atomic_destroy_state(plane, plane->state);
5571 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5572 WARN_ON(amdgpu_state == NULL);
5575 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5578 static struct drm_plane_state *
5579 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5581 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5583 old_dm_plane_state = to_dm_plane_state(plane->state);
5584 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5585 if (!dm_plane_state)
5588 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5590 if (old_dm_plane_state->dc_state) {
5591 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5592 dc_plane_state_retain(dm_plane_state->dc_state);
5595 return &dm_plane_state->base;
5598 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5599 struct drm_plane_state *state)
5601 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5603 if (dm_plane_state->dc_state)
5604 dc_plane_state_release(dm_plane_state->dc_state);
5606 drm_atomic_helper_plane_destroy_state(plane, state);
5609 static const struct drm_plane_funcs dm_plane_funcs = {
5610 .update_plane = drm_atomic_helper_update_plane,
5611 .disable_plane = drm_atomic_helper_disable_plane,
5612 .destroy = drm_primary_helper_destroy,
5613 .reset = dm_drm_plane_reset,
5614 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5615 .atomic_destroy_state = dm_drm_plane_destroy_state,
5618 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5619 struct drm_plane_state *new_state)
5621 struct amdgpu_framebuffer *afb;
5622 struct drm_gem_object *obj;
5623 struct amdgpu_device *adev;
5624 struct amdgpu_bo *rbo;
5625 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5626 struct list_head list;
5627 struct ttm_validate_buffer tv;
5628 struct ww_acquire_ctx ticket;
5629 uint64_t tiling_flags;
5632 bool tmz_surface = false;
5633 bool force_disable_dcc = false;
5635 dm_plane_state_old = to_dm_plane_state(plane->state);
5636 dm_plane_state_new = to_dm_plane_state(new_state);
5638 if (!new_state->fb) {
5639 DRM_DEBUG_DRIVER("No FB bound\n");
5643 afb = to_amdgpu_framebuffer(new_state->fb);
5644 obj = new_state->fb->obj[0];
5645 rbo = gem_to_amdgpu_bo(obj);
5646 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5647 INIT_LIST_HEAD(&list);
5651 list_add(&tv.head, &list);
5653 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5655 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5659 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5660 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5662 domain = AMDGPU_GEM_DOMAIN_VRAM;
5664 r = amdgpu_bo_pin(rbo, domain);
5665 if (unlikely(r != 0)) {
5666 if (r != -ERESTARTSYS)
5667 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5668 ttm_eu_backoff_reservation(&ticket, &list);
5672 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5673 if (unlikely(r != 0)) {
5674 amdgpu_bo_unpin(rbo);
5675 ttm_eu_backoff_reservation(&ticket, &list);
5676 DRM_ERROR("%p bind failed\n", rbo);
5680 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5682 tmz_surface = amdgpu_bo_encrypted(rbo);
5684 ttm_eu_backoff_reservation(&ticket, &list);
5686 afb->address = amdgpu_bo_gpu_offset(rbo);
5690 if (dm_plane_state_new->dc_state &&
5691 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5692 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5694 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5695 fill_plane_buffer_attributes(
5696 adev, afb, plane_state->format, plane_state->rotation,
5697 tiling_flags, &plane_state->tiling_info,
5698 &plane_state->plane_size, &plane_state->dcc,
5699 &plane_state->address, tmz_surface,
5706 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5707 struct drm_plane_state *old_state)
5709 struct amdgpu_bo *rbo;
5715 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5716 r = amdgpu_bo_reserve(rbo, false);
5718 DRM_ERROR("failed to reserve rbo before unpin\n");
5722 amdgpu_bo_unpin(rbo);
5723 amdgpu_bo_unreserve(rbo);
5724 amdgpu_bo_unref(&rbo);
5727 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5728 struct drm_crtc_state *new_crtc_state)
5730 int max_downscale = 0;
5731 int max_upscale = INT_MAX;
5733 /* TODO: These should be checked against DC plane caps */
5734 return drm_atomic_helper_check_plane_state(
5735 state, new_crtc_state, max_downscale, max_upscale, true, true);
5738 static int dm_plane_atomic_check(struct drm_plane *plane,
5739 struct drm_plane_state *state)
5741 struct amdgpu_device *adev = plane->dev->dev_private;
5742 struct dc *dc = adev->dm.dc;
5743 struct dm_plane_state *dm_plane_state;
5744 struct dc_scaling_info scaling_info;
5745 struct drm_crtc_state *new_crtc_state;
5748 dm_plane_state = to_dm_plane_state(state);
5750 if (!dm_plane_state->dc_state)
5754 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5755 if (!new_crtc_state)
5758 ret = dm_plane_helper_check_state(state, new_crtc_state);
5762 ret = fill_dc_scaling_info(state, &scaling_info);
5766 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5772 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5773 struct drm_plane_state *new_plane_state)
5775 /* Only support async updates on cursor planes. */
5776 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5782 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5783 struct drm_plane_state *new_state)
5785 struct drm_plane_state *old_state =
5786 drm_atomic_get_old_plane_state(new_state->state, plane);
5788 swap(plane->state->fb, new_state->fb);
5790 plane->state->src_x = new_state->src_x;
5791 plane->state->src_y = new_state->src_y;
5792 plane->state->src_w = new_state->src_w;
5793 plane->state->src_h = new_state->src_h;
5794 plane->state->crtc_x = new_state->crtc_x;
5795 plane->state->crtc_y = new_state->crtc_y;
5796 plane->state->crtc_w = new_state->crtc_w;
5797 plane->state->crtc_h = new_state->crtc_h;
5799 handle_cursor_update(plane, old_state);
5802 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5803 .prepare_fb = dm_plane_helper_prepare_fb,
5804 .cleanup_fb = dm_plane_helper_cleanup_fb,
5805 .atomic_check = dm_plane_atomic_check,
5806 .atomic_async_check = dm_plane_atomic_async_check,
5807 .atomic_async_update = dm_plane_atomic_async_update
5811 * TODO: these are currently initialized to rgb formats only.
5812 * For future use cases we should either initialize them dynamically based on
5813 * plane capabilities, or initialize this array to all formats, so internal drm
5814 * check will succeed, and let DC implement proper check
5816 static const uint32_t rgb_formats[] = {
5817 DRM_FORMAT_XRGB8888,
5818 DRM_FORMAT_ARGB8888,
5819 DRM_FORMAT_RGBA8888,
5820 DRM_FORMAT_XRGB2101010,
5821 DRM_FORMAT_XBGR2101010,
5822 DRM_FORMAT_ARGB2101010,
5823 DRM_FORMAT_ABGR2101010,
5824 DRM_FORMAT_XBGR8888,
5825 DRM_FORMAT_ABGR8888,
5829 static const uint32_t overlay_formats[] = {
5830 DRM_FORMAT_XRGB8888,
5831 DRM_FORMAT_ARGB8888,
5832 DRM_FORMAT_RGBA8888,
5833 DRM_FORMAT_XBGR8888,
5834 DRM_FORMAT_ABGR8888,
5838 static const u32 cursor_formats[] = {
5842 static int get_plane_formats(const struct drm_plane *plane,
5843 const struct dc_plane_cap *plane_cap,
5844 uint32_t *formats, int max_formats)
5846 int i, num_formats = 0;
5849 * TODO: Query support for each group of formats directly from
5850 * DC plane caps. This will require adding more formats to the
5854 switch (plane->type) {
5855 case DRM_PLANE_TYPE_PRIMARY:
5856 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5857 if (num_formats >= max_formats)
5860 formats[num_formats++] = rgb_formats[i];
5863 if (plane_cap && plane_cap->pixel_format_support.nv12)
5864 formats[num_formats++] = DRM_FORMAT_NV12;
5865 if (plane_cap && plane_cap->pixel_format_support.p010)
5866 formats[num_formats++] = DRM_FORMAT_P010;
5867 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5868 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5869 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5870 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5871 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5875 case DRM_PLANE_TYPE_OVERLAY:
5876 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5877 if (num_formats >= max_formats)
5880 formats[num_formats++] = overlay_formats[i];
5884 case DRM_PLANE_TYPE_CURSOR:
5885 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5886 if (num_formats >= max_formats)
5889 formats[num_formats++] = cursor_formats[i];
5897 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5898 struct drm_plane *plane,
5899 unsigned long possible_crtcs,
5900 const struct dc_plane_cap *plane_cap)
5902 uint32_t formats[32];
5905 unsigned int supported_rotations;
5907 num_formats = get_plane_formats(plane, plane_cap, formats,
5908 ARRAY_SIZE(formats));
5910 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5911 &dm_plane_funcs, formats, num_formats,
5912 NULL, plane->type, NULL);
5916 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5917 plane_cap && plane_cap->per_pixel_alpha) {
5918 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5919 BIT(DRM_MODE_BLEND_PREMULTI);
5921 drm_plane_create_alpha_property(plane);
5922 drm_plane_create_blend_mode_property(plane, blend_caps);
5925 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5927 (plane_cap->pixel_format_support.nv12 ||
5928 plane_cap->pixel_format_support.p010)) {
5929 /* This only affects YUV formats. */
5930 drm_plane_create_color_properties(
5932 BIT(DRM_COLOR_YCBCR_BT601) |
5933 BIT(DRM_COLOR_YCBCR_BT709) |
5934 BIT(DRM_COLOR_YCBCR_BT2020),
5935 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5936 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5937 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5940 supported_rotations =
5941 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5942 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5944 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5945 supported_rotations);
5947 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5949 /* Create (reset) the plane state */
5950 if (plane->funcs->reset)
5951 plane->funcs->reset(plane);
5956 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5957 struct drm_plane *plane,
5958 uint32_t crtc_index)
5960 struct amdgpu_crtc *acrtc = NULL;
5961 struct drm_plane *cursor_plane;
5965 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5969 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5970 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5972 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5976 res = drm_crtc_init_with_planes(
5981 &amdgpu_dm_crtc_funcs, NULL);
5986 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5988 /* Create (reset) the plane state */
5989 if (acrtc->base.funcs->reset)
5990 acrtc->base.funcs->reset(&acrtc->base);
5992 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5993 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5995 acrtc->crtc_id = crtc_index;
5996 acrtc->base.enabled = false;
5997 acrtc->otg_inst = -1;
5999 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6000 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6001 true, MAX_COLOR_LUT_ENTRIES);
6002 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6008 kfree(cursor_plane);
6013 static int to_drm_connector_type(enum signal_type st)
6016 case SIGNAL_TYPE_HDMI_TYPE_A:
6017 return DRM_MODE_CONNECTOR_HDMIA;
6018 case SIGNAL_TYPE_EDP:
6019 return DRM_MODE_CONNECTOR_eDP;
6020 case SIGNAL_TYPE_LVDS:
6021 return DRM_MODE_CONNECTOR_LVDS;
6022 case SIGNAL_TYPE_RGB:
6023 return DRM_MODE_CONNECTOR_VGA;
6024 case SIGNAL_TYPE_DISPLAY_PORT:
6025 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6026 return DRM_MODE_CONNECTOR_DisplayPort;
6027 case SIGNAL_TYPE_DVI_DUAL_LINK:
6028 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6029 return DRM_MODE_CONNECTOR_DVID;
6030 case SIGNAL_TYPE_VIRTUAL:
6031 return DRM_MODE_CONNECTOR_VIRTUAL;
6034 return DRM_MODE_CONNECTOR_Unknown;
6038 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6040 struct drm_encoder *encoder;
6042 /* There is only one encoder per connector */
6043 drm_connector_for_each_possible_encoder(connector, encoder)
6049 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6051 struct drm_encoder *encoder;
6052 struct amdgpu_encoder *amdgpu_encoder;
6054 encoder = amdgpu_dm_connector_to_encoder(connector);
6056 if (encoder == NULL)
6059 amdgpu_encoder = to_amdgpu_encoder(encoder);
6061 amdgpu_encoder->native_mode.clock = 0;
6063 if (!list_empty(&connector->probed_modes)) {
6064 struct drm_display_mode *preferred_mode = NULL;
6066 list_for_each_entry(preferred_mode,
6067 &connector->probed_modes,
6069 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6070 amdgpu_encoder->native_mode = *preferred_mode;
6078 static struct drm_display_mode *
6079 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6081 int hdisplay, int vdisplay)
6083 struct drm_device *dev = encoder->dev;
6084 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6085 struct drm_display_mode *mode = NULL;
6086 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6088 mode = drm_mode_duplicate(dev, native_mode);
6093 mode->hdisplay = hdisplay;
6094 mode->vdisplay = vdisplay;
6095 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6096 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6102 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6103 struct drm_connector *connector)
6105 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6106 struct drm_display_mode *mode = NULL;
6107 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6108 struct amdgpu_dm_connector *amdgpu_dm_connector =
6109 to_amdgpu_dm_connector(connector);
6113 char name[DRM_DISPLAY_MODE_LEN];
6116 } common_modes[] = {
6117 { "640x480", 640, 480},
6118 { "800x600", 800, 600},
6119 { "1024x768", 1024, 768},
6120 { "1280x720", 1280, 720},
6121 { "1280x800", 1280, 800},
6122 {"1280x1024", 1280, 1024},
6123 { "1440x900", 1440, 900},
6124 {"1680x1050", 1680, 1050},
6125 {"1600x1200", 1600, 1200},
6126 {"1920x1080", 1920, 1080},
6127 {"1920x1200", 1920, 1200}
6130 n = ARRAY_SIZE(common_modes);
6132 for (i = 0; i < n; i++) {
6133 struct drm_display_mode *curmode = NULL;
6134 bool mode_existed = false;
6136 if (common_modes[i].w > native_mode->hdisplay ||
6137 common_modes[i].h > native_mode->vdisplay ||
6138 (common_modes[i].w == native_mode->hdisplay &&
6139 common_modes[i].h == native_mode->vdisplay))
6142 list_for_each_entry(curmode, &connector->probed_modes, head) {
6143 if (common_modes[i].w == curmode->hdisplay &&
6144 common_modes[i].h == curmode->vdisplay) {
6145 mode_existed = true;
6153 mode = amdgpu_dm_create_common_mode(encoder,
6154 common_modes[i].name, common_modes[i].w,
6156 drm_mode_probed_add(connector, mode);
6157 amdgpu_dm_connector->num_modes++;
6161 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6164 struct amdgpu_dm_connector *amdgpu_dm_connector =
6165 to_amdgpu_dm_connector(connector);
6168 /* empty probed_modes */
6169 INIT_LIST_HEAD(&connector->probed_modes);
6170 amdgpu_dm_connector->num_modes =
6171 drm_add_edid_modes(connector, edid);
6173 /* sorting the probed modes before calling function
6174 * amdgpu_dm_get_native_mode() since EDID can have
6175 * more than one preferred mode. The modes that are
6176 * later in the probed mode list could be of higher
6177 * and preferred resolution. For example, 3840x2160
6178 * resolution in base EDID preferred timing and 4096x2160
6179 * preferred resolution in DID extension block later.
6181 drm_mode_sort(&connector->probed_modes);
6182 amdgpu_dm_get_native_mode(connector);
6184 amdgpu_dm_connector->num_modes = 0;
6188 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6190 struct amdgpu_dm_connector *amdgpu_dm_connector =
6191 to_amdgpu_dm_connector(connector);
6192 struct drm_encoder *encoder;
6193 struct edid *edid = amdgpu_dm_connector->edid;
6195 encoder = amdgpu_dm_connector_to_encoder(connector);
6197 if (!edid || !drm_edid_is_valid(edid)) {
6198 amdgpu_dm_connector->num_modes =
6199 drm_add_modes_noedid(connector, 640, 480);
6201 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6202 amdgpu_dm_connector_add_common_modes(encoder, connector);
6204 amdgpu_dm_fbc_init(connector);
6206 return amdgpu_dm_connector->num_modes;
6209 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6210 struct amdgpu_dm_connector *aconnector,
6212 struct dc_link *link,
6215 struct amdgpu_device *adev = dm->ddev->dev_private;
6218 * Some of the properties below require access to state, like bpc.
6219 * Allocate some default initial connector state with our reset helper.
6221 if (aconnector->base.funcs->reset)
6222 aconnector->base.funcs->reset(&aconnector->base);
6224 aconnector->connector_id = link_index;
6225 aconnector->dc_link = link;
6226 aconnector->base.interlace_allowed = false;
6227 aconnector->base.doublescan_allowed = false;
6228 aconnector->base.stereo_allowed = false;
6229 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6230 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6231 aconnector->audio_inst = -1;
6232 mutex_init(&aconnector->hpd_lock);
6235 * configure support HPD hot plug connector_>polled default value is 0
6236 * which means HPD hot plug not supported
6238 switch (connector_type) {
6239 case DRM_MODE_CONNECTOR_HDMIA:
6240 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6241 aconnector->base.ycbcr_420_allowed =
6242 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6244 case DRM_MODE_CONNECTOR_DisplayPort:
6245 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6246 aconnector->base.ycbcr_420_allowed =
6247 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6249 case DRM_MODE_CONNECTOR_DVID:
6250 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6256 drm_object_attach_property(&aconnector->base.base,
6257 dm->ddev->mode_config.scaling_mode_property,
6258 DRM_MODE_SCALE_NONE);
6260 drm_object_attach_property(&aconnector->base.base,
6261 adev->mode_info.underscan_property,
6263 drm_object_attach_property(&aconnector->base.base,
6264 adev->mode_info.underscan_hborder_property,
6266 drm_object_attach_property(&aconnector->base.base,
6267 adev->mode_info.underscan_vborder_property,
6270 if (!aconnector->mst_port)
6271 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6273 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6274 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6275 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6277 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6278 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6279 drm_object_attach_property(&aconnector->base.base,
6280 adev->mode_info.abm_level_property, 0);
6283 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6284 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6285 connector_type == DRM_MODE_CONNECTOR_eDP) {
6286 drm_object_attach_property(
6287 &aconnector->base.base,
6288 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6290 if (!aconnector->mst_port)
6291 drm_connector_attach_vrr_capable_property(&aconnector->base);
6293 #ifdef CONFIG_DRM_AMD_DC_HDCP
6294 if (adev->dm.hdcp_workqueue)
6295 drm_connector_attach_content_protection_property(&aconnector->base, true);
6300 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6301 struct i2c_msg *msgs, int num)
6303 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6304 struct ddc_service *ddc_service = i2c->ddc_service;
6305 struct i2c_command cmd;
6309 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6314 cmd.number_of_payloads = num;
6315 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6318 for (i = 0; i < num; i++) {
6319 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6320 cmd.payloads[i].address = msgs[i].addr;
6321 cmd.payloads[i].length = msgs[i].len;
6322 cmd.payloads[i].data = msgs[i].buf;
6326 ddc_service->ctx->dc,
6327 ddc_service->ddc_pin->hw_info.ddc_channel,
6331 kfree(cmd.payloads);
6335 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6337 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6340 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6341 .master_xfer = amdgpu_dm_i2c_xfer,
6342 .functionality = amdgpu_dm_i2c_func,
6345 static struct amdgpu_i2c_adapter *
6346 create_i2c(struct ddc_service *ddc_service,
6350 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6351 struct amdgpu_i2c_adapter *i2c;
6353 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6356 i2c->base.owner = THIS_MODULE;
6357 i2c->base.class = I2C_CLASS_DDC;
6358 i2c->base.dev.parent = &adev->pdev->dev;
6359 i2c->base.algo = &amdgpu_dm_i2c_algo;
6360 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6361 i2c_set_adapdata(&i2c->base, i2c);
6362 i2c->ddc_service = ddc_service;
6363 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6370 * Note: this function assumes that dc_link_detect() was called for the
6371 * dc_link which will be represented by this aconnector.
6373 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6374 struct amdgpu_dm_connector *aconnector,
6375 uint32_t link_index,
6376 struct amdgpu_encoder *aencoder)
6380 struct dc *dc = dm->dc;
6381 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6382 struct amdgpu_i2c_adapter *i2c;
6384 link->priv = aconnector;
6386 DRM_DEBUG_DRIVER("%s()\n", __func__);
6388 i2c = create_i2c(link->ddc, link->link_index, &res);
6390 DRM_ERROR("Failed to create i2c adapter data\n");
6394 aconnector->i2c = i2c;
6395 res = i2c_add_adapter(&i2c->base);
6398 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6402 connector_type = to_drm_connector_type(link->connector_signal);
6404 res = drm_connector_init_with_ddc(
6407 &amdgpu_dm_connector_funcs,
6412 DRM_ERROR("connector_init failed\n");
6413 aconnector->connector_id = -1;
6417 drm_connector_helper_add(
6419 &amdgpu_dm_connector_helper_funcs);
6421 amdgpu_dm_connector_init_helper(
6428 drm_connector_attach_encoder(
6429 &aconnector->base, &aencoder->base);
6431 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6432 || connector_type == DRM_MODE_CONNECTOR_eDP)
6433 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6438 aconnector->i2c = NULL;
6443 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6445 switch (adev->mode_info.num_crtc) {
6462 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6463 struct amdgpu_encoder *aencoder,
6464 uint32_t link_index)
6466 struct amdgpu_device *adev = dev->dev_private;
6468 int res = drm_encoder_init(dev,
6470 &amdgpu_dm_encoder_funcs,
6471 DRM_MODE_ENCODER_TMDS,
6474 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6477 aencoder->encoder_id = link_index;
6479 aencoder->encoder_id = -1;
6481 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6486 static void manage_dm_interrupts(struct amdgpu_device *adev,
6487 struct amdgpu_crtc *acrtc,
6491 * We have no guarantee that the frontend index maps to the same
6492 * backend index - some even map to more than one.
6494 * TODO: Use a different interrupt or check DC itself for the mapping.
6497 amdgpu_display_crtc_idx_to_irq_type(
6502 drm_crtc_vblank_on(&acrtc->base);
6505 &adev->pageflip_irq,
6511 &adev->pageflip_irq,
6513 drm_crtc_vblank_off(&acrtc->base);
6517 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6518 struct amdgpu_crtc *acrtc)
6521 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6524 * This reads the current state for the IRQ and force reapplies
6525 * the setting to hardware.
6527 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6531 is_scaling_state_different(const struct dm_connector_state *dm_state,
6532 const struct dm_connector_state *old_dm_state)
6534 if (dm_state->scaling != old_dm_state->scaling)
6536 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6537 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6539 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6540 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6542 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6543 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6548 #ifdef CONFIG_DRM_AMD_DC_HDCP
6549 static bool is_content_protection_different(struct drm_connector_state *state,
6550 const struct drm_connector_state *old_state,
6551 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6553 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6555 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6556 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6557 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6561 /* CP is being re enabled, ignore this */
6562 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6563 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6564 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6568 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6569 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6570 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6571 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6573 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6574 * hot-plug, headless s3, dpms
6576 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6577 aconnector->dc_sink != NULL)
6580 if (old_state->content_protection == state->content_protection)
6583 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6590 static void remove_stream(struct amdgpu_device *adev,
6591 struct amdgpu_crtc *acrtc,
6592 struct dc_stream_state *stream)
6594 /* this is the update mode case */
6596 acrtc->otg_inst = -1;
6597 acrtc->enabled = false;
6600 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6601 struct dc_cursor_position *position)
6603 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6605 int xorigin = 0, yorigin = 0;
6607 position->enable = false;
6611 if (!crtc || !plane->state->fb)
6614 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6615 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6616 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6618 plane->state->crtc_w,
6619 plane->state->crtc_h);
6623 x = plane->state->crtc_x;
6624 y = plane->state->crtc_y;
6626 if (x <= -amdgpu_crtc->max_cursor_width ||
6627 y <= -amdgpu_crtc->max_cursor_height)
6631 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6635 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6638 position->enable = true;
6639 position->translate_by_source = true;
6642 position->x_hotspot = xorigin;
6643 position->y_hotspot = yorigin;
6648 static void handle_cursor_update(struct drm_plane *plane,
6649 struct drm_plane_state *old_plane_state)
6651 struct amdgpu_device *adev = plane->dev->dev_private;
6652 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6653 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6654 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6655 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6656 uint64_t address = afb ? afb->address : 0;
6657 struct dc_cursor_position position;
6658 struct dc_cursor_attributes attributes;
6661 if (!plane->state->fb && !old_plane_state->fb)
6664 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6666 amdgpu_crtc->crtc_id,
6667 plane->state->crtc_w,
6668 plane->state->crtc_h);
6670 ret = get_cursor_position(plane, crtc, &position);
6674 if (!position.enable) {
6675 /* turn off cursor */
6676 if (crtc_state && crtc_state->stream) {
6677 mutex_lock(&adev->dm.dc_lock);
6678 dc_stream_set_cursor_position(crtc_state->stream,
6680 mutex_unlock(&adev->dm.dc_lock);
6685 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6686 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6688 memset(&attributes, 0, sizeof(attributes));
6689 attributes.address.high_part = upper_32_bits(address);
6690 attributes.address.low_part = lower_32_bits(address);
6691 attributes.width = plane->state->crtc_w;
6692 attributes.height = plane->state->crtc_h;
6693 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6694 attributes.rotation_angle = 0;
6695 attributes.attribute_flags.value = 0;
6697 attributes.pitch = attributes.width;
6699 if (crtc_state->stream) {
6700 mutex_lock(&adev->dm.dc_lock);
6701 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6703 DRM_ERROR("DC failed to set cursor attributes\n");
6705 if (!dc_stream_set_cursor_position(crtc_state->stream,
6707 DRM_ERROR("DC failed to set cursor position\n");
6708 mutex_unlock(&adev->dm.dc_lock);
6712 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6715 assert_spin_locked(&acrtc->base.dev->event_lock);
6716 WARN_ON(acrtc->event);
6718 acrtc->event = acrtc->base.state->event;
6720 /* Set the flip status */
6721 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6723 /* Mark this event as consumed */
6724 acrtc->base.state->event = NULL;
6726 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6730 static void update_freesync_state_on_stream(
6731 struct amdgpu_display_manager *dm,
6732 struct dm_crtc_state *new_crtc_state,
6733 struct dc_stream_state *new_stream,
6734 struct dc_plane_state *surface,
6735 u32 flip_timestamp_in_us)
6737 struct mod_vrr_params vrr_params;
6738 struct dc_info_packet vrr_infopacket = {0};
6739 struct amdgpu_device *adev = dm->adev;
6740 unsigned long flags;
6746 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6747 * For now it's sufficient to just guard against these conditions.
6750 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6753 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6754 vrr_params = new_crtc_state->vrr_params;
6757 mod_freesync_handle_preflip(
6758 dm->freesync_module,
6761 flip_timestamp_in_us,
6764 if (adev->family < AMDGPU_FAMILY_AI &&
6765 amdgpu_dm_vrr_active(new_crtc_state)) {
6766 mod_freesync_handle_v_update(dm->freesync_module,
6767 new_stream, &vrr_params);
6769 /* Need to call this before the frame ends. */
6770 dc_stream_adjust_vmin_vmax(dm->dc,
6771 new_crtc_state->stream,
6772 &vrr_params.adjust);
6776 mod_freesync_build_vrr_infopacket(
6777 dm->freesync_module,
6781 TRANSFER_FUNC_UNKNOWN,
6784 new_crtc_state->freesync_timing_changed |=
6785 (memcmp(&new_crtc_state->vrr_params.adjust,
6787 sizeof(vrr_params.adjust)) != 0);
6789 new_crtc_state->freesync_vrr_info_changed |=
6790 (memcmp(&new_crtc_state->vrr_infopacket,
6792 sizeof(vrr_infopacket)) != 0);
6794 new_crtc_state->vrr_params = vrr_params;
6795 new_crtc_state->vrr_infopacket = vrr_infopacket;
6797 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6798 new_stream->vrr_infopacket = vrr_infopacket;
6800 if (new_crtc_state->freesync_vrr_info_changed)
6801 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6802 new_crtc_state->base.crtc->base.id,
6803 (int)new_crtc_state->base.vrr_enabled,
6804 (int)vrr_params.state);
6806 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6809 static void pre_update_freesync_state_on_stream(
6810 struct amdgpu_display_manager *dm,
6811 struct dm_crtc_state *new_crtc_state)
6813 struct dc_stream_state *new_stream = new_crtc_state->stream;
6814 struct mod_vrr_params vrr_params;
6815 struct mod_freesync_config config = new_crtc_state->freesync_config;
6816 struct amdgpu_device *adev = dm->adev;
6817 unsigned long flags;
6823 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6824 * For now it's sufficient to just guard against these conditions.
6826 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6829 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6830 vrr_params = new_crtc_state->vrr_params;
6832 if (new_crtc_state->vrr_supported &&
6833 config.min_refresh_in_uhz &&
6834 config.max_refresh_in_uhz) {
6835 config.state = new_crtc_state->base.vrr_enabled ?
6836 VRR_STATE_ACTIVE_VARIABLE :
6839 config.state = VRR_STATE_UNSUPPORTED;
6842 mod_freesync_build_vrr_params(dm->freesync_module,
6844 &config, &vrr_params);
6846 new_crtc_state->freesync_timing_changed |=
6847 (memcmp(&new_crtc_state->vrr_params.adjust,
6849 sizeof(vrr_params.adjust)) != 0);
6851 new_crtc_state->vrr_params = vrr_params;
6852 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6855 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6856 struct dm_crtc_state *new_state)
6858 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6859 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6861 if (!old_vrr_active && new_vrr_active) {
6862 /* Transition VRR inactive -> active:
6863 * While VRR is active, we must not disable vblank irq, as a
6864 * reenable after disable would compute bogus vblank/pflip
6865 * timestamps if it likely happened inside display front-porch.
6867 * We also need vupdate irq for the actual core vblank handling
6870 dm_set_vupdate_irq(new_state->base.crtc, true);
6871 drm_crtc_vblank_get(new_state->base.crtc);
6872 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6873 __func__, new_state->base.crtc->base.id);
6874 } else if (old_vrr_active && !new_vrr_active) {
6875 /* Transition VRR active -> inactive:
6876 * Allow vblank irq disable again for fixed refresh rate.
6878 dm_set_vupdate_irq(new_state->base.crtc, false);
6879 drm_crtc_vblank_put(new_state->base.crtc);
6880 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6881 __func__, new_state->base.crtc->base.id);
6885 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6887 struct drm_plane *plane;
6888 struct drm_plane_state *old_plane_state, *new_plane_state;
6892 * TODO: Make this per-stream so we don't issue redundant updates for
6893 * commits with multiple streams.
6895 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6897 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6898 handle_cursor_update(plane, old_plane_state);
6901 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6902 struct dc_state *dc_state,
6903 struct drm_device *dev,
6904 struct amdgpu_display_manager *dm,
6905 struct drm_crtc *pcrtc,
6906 bool wait_for_vblank)
6909 uint64_t timestamp_ns;
6910 struct drm_plane *plane;
6911 struct drm_plane_state *old_plane_state, *new_plane_state;
6912 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6913 struct drm_crtc_state *new_pcrtc_state =
6914 drm_atomic_get_new_crtc_state(state, pcrtc);
6915 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6916 struct dm_crtc_state *dm_old_crtc_state =
6917 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6918 int planes_count = 0, vpos, hpos;
6920 unsigned long flags;
6921 struct amdgpu_bo *abo;
6922 uint64_t tiling_flags;
6923 bool tmz_surface = false;
6924 uint32_t target_vblank, last_flip_vblank;
6925 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6926 bool pflip_present = false;
6928 struct dc_surface_update surface_updates[MAX_SURFACES];
6929 struct dc_plane_info plane_infos[MAX_SURFACES];
6930 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6931 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6932 struct dc_stream_update stream_update;
6935 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6938 dm_error("Failed to allocate update bundle\n");
6943 * Disable the cursor first if we're disabling all the planes.
6944 * It'll remain on the screen after the planes are re-enabled
6947 if (acrtc_state->active_planes == 0)
6948 amdgpu_dm_commit_cursors(state);
6950 /* update planes when needed */
6951 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6952 struct drm_crtc *crtc = new_plane_state->crtc;
6953 struct drm_crtc_state *new_crtc_state;
6954 struct drm_framebuffer *fb = new_plane_state->fb;
6955 bool plane_needs_flip;
6956 struct dc_plane_state *dc_plane;
6957 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6959 /* Cursor plane is handled after stream updates */
6960 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6963 if (!fb || !crtc || pcrtc != crtc)
6966 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6967 if (!new_crtc_state->active)
6970 dc_plane = dm_new_plane_state->dc_state;
6972 bundle->surface_updates[planes_count].surface = dc_plane;
6973 if (new_pcrtc_state->color_mgmt_changed) {
6974 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6975 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6976 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6979 fill_dc_scaling_info(new_plane_state,
6980 &bundle->scaling_infos[planes_count]);
6982 bundle->surface_updates[planes_count].scaling_info =
6983 &bundle->scaling_infos[planes_count];
6985 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6987 pflip_present = pflip_present || plane_needs_flip;
6989 if (!plane_needs_flip) {
6994 abo = gem_to_amdgpu_bo(fb->obj[0]);
6997 * Wait for all fences on this FB. Do limited wait to avoid
6998 * deadlock during GPU reset when this fence will not signal
6999 * but we hold reservation lock for the BO.
7001 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7003 msecs_to_jiffies(5000));
7004 if (unlikely(r <= 0))
7005 DRM_ERROR("Waiting for fences timed out!");
7008 * TODO This might fail and hence better not used, wait
7009 * explicitly on fences instead
7010 * and in general should be called for
7011 * blocking commit to as per framework helpers
7013 r = amdgpu_bo_reserve(abo, true);
7014 if (unlikely(r != 0))
7015 DRM_ERROR("failed to reserve buffer before flip\n");
7017 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
7019 tmz_surface = amdgpu_bo_encrypted(abo);
7021 amdgpu_bo_unreserve(abo);
7023 fill_dc_plane_info_and_addr(
7024 dm->adev, new_plane_state, tiling_flags,
7025 &bundle->plane_infos[planes_count],
7026 &bundle->flip_addrs[planes_count].address,
7030 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7031 new_plane_state->plane->index,
7032 bundle->plane_infos[planes_count].dcc.enable);
7034 bundle->surface_updates[planes_count].plane_info =
7035 &bundle->plane_infos[planes_count];
7038 * Only allow immediate flips for fast updates that don't
7039 * change FB pitch, DCC state, rotation or mirroing.
7041 bundle->flip_addrs[planes_count].flip_immediate =
7042 crtc->state->async_flip &&
7043 acrtc_state->update_type == UPDATE_TYPE_FAST;
7045 timestamp_ns = ktime_get_ns();
7046 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7047 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7048 bundle->surface_updates[planes_count].surface = dc_plane;
7050 if (!bundle->surface_updates[planes_count].surface) {
7051 DRM_ERROR("No surface for CRTC: id=%d\n",
7052 acrtc_attach->crtc_id);
7056 if (plane == pcrtc->primary)
7057 update_freesync_state_on_stream(
7060 acrtc_state->stream,
7062 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7064 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7066 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7067 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7073 if (pflip_present) {
7075 /* Use old throttling in non-vrr fixed refresh rate mode
7076 * to keep flip scheduling based on target vblank counts
7077 * working in a backwards compatible way, e.g., for
7078 * clients using the GLX_OML_sync_control extension or
7079 * DRI3/Present extension with defined target_msc.
7081 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7084 /* For variable refresh rate mode only:
7085 * Get vblank of last completed flip to avoid > 1 vrr
7086 * flips per video frame by use of throttling, but allow
7087 * flip programming anywhere in the possibly large
7088 * variable vrr vblank interval for fine-grained flip
7089 * timing control and more opportunity to avoid stutter
7090 * on late submission of flips.
7092 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7093 last_flip_vblank = acrtc_attach->last_flip_vblank;
7094 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7097 target_vblank = last_flip_vblank + wait_for_vblank;
7100 * Wait until we're out of the vertical blank period before the one
7101 * targeted by the flip
7103 while ((acrtc_attach->enabled &&
7104 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7105 0, &vpos, &hpos, NULL,
7106 NULL, &pcrtc->hwmode)
7107 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7108 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7109 (int)(target_vblank -
7110 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7111 usleep_range(1000, 1100);
7115 * Prepare the flip event for the pageflip interrupt to handle.
7117 * This only works in the case where we've already turned on the
7118 * appropriate hardware blocks (eg. HUBP) so in the transition case
7119 * from 0 -> n planes we have to skip a hardware generated event
7120 * and rely on sending it from software.
7122 if (acrtc_attach->base.state->event &&
7123 acrtc_state->active_planes > 0) {
7124 drm_crtc_vblank_get(pcrtc);
7126 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7128 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7129 prepare_flip_isr(acrtc_attach);
7131 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7134 if (acrtc_state->stream) {
7135 if (acrtc_state->freesync_vrr_info_changed)
7136 bundle->stream_update.vrr_infopacket =
7137 &acrtc_state->stream->vrr_infopacket;
7141 /* Update the planes if changed or disable if we don't have any. */
7142 if ((planes_count || acrtc_state->active_planes == 0) &&
7143 acrtc_state->stream) {
7144 bundle->stream_update.stream = acrtc_state->stream;
7145 if (new_pcrtc_state->mode_changed) {
7146 bundle->stream_update.src = acrtc_state->stream->src;
7147 bundle->stream_update.dst = acrtc_state->stream->dst;
7150 if (new_pcrtc_state->color_mgmt_changed) {
7152 * TODO: This isn't fully correct since we've actually
7153 * already modified the stream in place.
7155 bundle->stream_update.gamut_remap =
7156 &acrtc_state->stream->gamut_remap_matrix;
7157 bundle->stream_update.output_csc_transform =
7158 &acrtc_state->stream->csc_color_matrix;
7159 bundle->stream_update.out_transfer_func =
7160 acrtc_state->stream->out_transfer_func;
7163 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7164 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7165 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7168 * If FreeSync state on the stream has changed then we need to
7169 * re-adjust the min/max bounds now that DC doesn't handle this
7170 * as part of commit.
7172 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7173 amdgpu_dm_vrr_active(acrtc_state)) {
7174 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7175 dc_stream_adjust_vmin_vmax(
7176 dm->dc, acrtc_state->stream,
7177 &acrtc_state->vrr_params.adjust);
7178 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7180 mutex_lock(&dm->dc_lock);
7181 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7182 acrtc_state->stream->link->psr_settings.psr_allow_active)
7183 amdgpu_dm_psr_disable(acrtc_state->stream);
7185 dc_commit_updates_for_stream(dm->dc,
7186 bundle->surface_updates,
7188 acrtc_state->stream,
7189 &bundle->stream_update,
7193 * Enable or disable the interrupts on the backend.
7195 * Most pipes are put into power gating when unused.
7197 * When power gating is enabled on a pipe we lose the
7198 * interrupt enablement state when power gating is disabled.
7200 * So we need to update the IRQ control state in hardware
7201 * whenever the pipe turns on (since it could be previously
7202 * power gated) or off (since some pipes can't be power gated
7205 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7206 dm_update_pflip_irq_state(
7207 (struct amdgpu_device *)dev->dev_private,
7210 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7211 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7212 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7213 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7214 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7215 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7216 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7217 amdgpu_dm_psr_enable(acrtc_state->stream);
7220 mutex_unlock(&dm->dc_lock);
7224 * Update cursor state *after* programming all the planes.
7225 * This avoids redundant programming in the case where we're going
7226 * to be disabling a single plane - those pipes are being disabled.
7228 if (acrtc_state->active_planes)
7229 amdgpu_dm_commit_cursors(state);
7235 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7236 struct drm_atomic_state *state)
7238 struct amdgpu_device *adev = dev->dev_private;
7239 struct amdgpu_dm_connector *aconnector;
7240 struct drm_connector *connector;
7241 struct drm_connector_state *old_con_state, *new_con_state;
7242 struct drm_crtc_state *new_crtc_state;
7243 struct dm_crtc_state *new_dm_crtc_state;
7244 const struct dc_stream_status *status;
7247 /* Notify device removals. */
7248 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7249 if (old_con_state->crtc != new_con_state->crtc) {
7250 /* CRTC changes require notification. */
7254 if (!new_con_state->crtc)
7257 new_crtc_state = drm_atomic_get_new_crtc_state(
7258 state, new_con_state->crtc);
7260 if (!new_crtc_state)
7263 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7267 aconnector = to_amdgpu_dm_connector(connector);
7269 mutex_lock(&adev->dm.audio_lock);
7270 inst = aconnector->audio_inst;
7271 aconnector->audio_inst = -1;
7272 mutex_unlock(&adev->dm.audio_lock);
7274 amdgpu_dm_audio_eld_notify(adev, inst);
7277 /* Notify audio device additions. */
7278 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7279 if (!new_con_state->crtc)
7282 new_crtc_state = drm_atomic_get_new_crtc_state(
7283 state, new_con_state->crtc);
7285 if (!new_crtc_state)
7288 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7291 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7292 if (!new_dm_crtc_state->stream)
7295 status = dc_stream_get_status(new_dm_crtc_state->stream);
7299 aconnector = to_amdgpu_dm_connector(connector);
7301 mutex_lock(&adev->dm.audio_lock);
7302 inst = status->audio_inst;
7303 aconnector->audio_inst = inst;
7304 mutex_unlock(&adev->dm.audio_lock);
7306 amdgpu_dm_audio_eld_notify(adev, inst);
7311 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7312 * @crtc_state: the DRM CRTC state
7313 * @stream_state: the DC stream state.
7315 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7316 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7318 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7319 struct dc_stream_state *stream_state)
7321 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7324 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7325 struct drm_atomic_state *state,
7328 struct drm_crtc *crtc;
7329 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7330 struct amdgpu_device *adev = dev->dev_private;
7334 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7335 * a modeset, being disabled, or have no active planes.
7337 * It's done in atomic commit rather than commit tail for now since
7338 * some of these interrupt handlers access the current CRTC state and
7339 * potentially the stream pointer itself.
7341 * Since the atomic state is swapped within atomic commit and not within
7342 * commit tail this would leave to new state (that hasn't been committed yet)
7343 * being accesssed from within the handlers.
7345 * TODO: Fix this so we can do this in commit tail and not have to block
7348 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7349 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7351 if (old_crtc_state->active &&
7352 (!new_crtc_state->active ||
7353 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7354 manage_dm_interrupts(adev, acrtc, false);
7357 * Add check here for SoC's that support hardware cursor plane, to
7358 * unset legacy_cursor_update
7361 return drm_atomic_helper_commit(dev, state, nonblock);
7363 /*TODO Handle EINTR, reenable IRQ*/
7367 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7368 * @state: The atomic state to commit
7370 * This will tell DC to commit the constructed DC state from atomic_check,
7371 * programming the hardware. Any failures here implies a hardware failure, since
7372 * atomic check should have filtered anything non-kosher.
7374 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7376 struct drm_device *dev = state->dev;
7377 struct amdgpu_device *adev = dev->dev_private;
7378 struct amdgpu_display_manager *dm = &adev->dm;
7379 struct dm_atomic_state *dm_state;
7380 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7382 struct drm_crtc *crtc;
7383 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7384 unsigned long flags;
7385 bool wait_for_vblank = true;
7386 struct drm_connector *connector;
7387 struct drm_connector_state *old_con_state, *new_con_state;
7388 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7389 int crtc_disable_count = 0;
7391 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7393 dm_state = dm_atomic_get_new_state(state);
7394 if (dm_state && dm_state->context) {
7395 dc_state = dm_state->context;
7397 /* No state changes, retain current state. */
7398 dc_state_temp = dc_create_state(dm->dc);
7399 ASSERT(dc_state_temp);
7400 dc_state = dc_state_temp;
7401 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7404 /* update changed items */
7405 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7406 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7408 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7409 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7412 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7413 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7414 "connectors_changed:%d\n",
7416 new_crtc_state->enable,
7417 new_crtc_state->active,
7418 new_crtc_state->planes_changed,
7419 new_crtc_state->mode_changed,
7420 new_crtc_state->active_changed,
7421 new_crtc_state->connectors_changed);
7423 /* Copy all transient state flags into dc state */
7424 if (dm_new_crtc_state->stream) {
7425 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7426 dm_new_crtc_state->stream);
7429 /* handles headless hotplug case, updating new_state and
7430 * aconnector as needed
7433 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7435 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7437 if (!dm_new_crtc_state->stream) {
7439 * this could happen because of issues with
7440 * userspace notifications delivery.
7441 * In this case userspace tries to set mode on
7442 * display which is disconnected in fact.
7443 * dc_sink is NULL in this case on aconnector.
7444 * We expect reset mode will come soon.
7446 * This can also happen when unplug is done
7447 * during resume sequence ended
7449 * In this case, we want to pretend we still
7450 * have a sink to keep the pipe running so that
7451 * hw state is consistent with the sw state
7453 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7454 __func__, acrtc->base.base.id);
7458 if (dm_old_crtc_state->stream)
7459 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7461 pm_runtime_get_noresume(dev->dev);
7463 acrtc->enabled = true;
7464 acrtc->hw_mode = new_crtc_state->mode;
7465 crtc->hwmode = new_crtc_state->mode;
7466 } else if (modereset_required(new_crtc_state)) {
7467 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7468 /* i.e. reset mode */
7469 if (dm_old_crtc_state->stream) {
7470 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7471 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7473 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7476 } /* for_each_crtc_in_state() */
7479 dm_enable_per_frame_crtc_master_sync(dc_state);
7480 mutex_lock(&dm->dc_lock);
7481 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7482 mutex_unlock(&dm->dc_lock);
7485 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7486 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7488 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7490 if (dm_new_crtc_state->stream != NULL) {
7491 const struct dc_stream_status *status =
7492 dc_stream_get_status(dm_new_crtc_state->stream);
7495 status = dc_stream_get_status_from_state(dc_state,
7496 dm_new_crtc_state->stream);
7499 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7501 acrtc->otg_inst = status->primary_otg_inst;
7504 #ifdef CONFIG_DRM_AMD_DC_HDCP
7505 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7506 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7507 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7508 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7510 new_crtc_state = NULL;
7513 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7515 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7517 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7518 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7519 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7520 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7524 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7525 hdcp_update_display(
7526 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7527 new_con_state->hdcp_content_type,
7528 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7533 /* Handle connector state changes */
7534 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7535 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7536 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7537 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7538 struct dc_surface_update dummy_updates[MAX_SURFACES];
7539 struct dc_stream_update stream_update;
7540 struct dc_info_packet hdr_packet;
7541 struct dc_stream_status *status = NULL;
7542 bool abm_changed, hdr_changed, scaling_changed;
7544 memset(&dummy_updates, 0, sizeof(dummy_updates));
7545 memset(&stream_update, 0, sizeof(stream_update));
7548 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7549 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7552 /* Skip any modesets/resets */
7553 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7556 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7557 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7559 scaling_changed = is_scaling_state_different(dm_new_con_state,
7562 abm_changed = dm_new_crtc_state->abm_level !=
7563 dm_old_crtc_state->abm_level;
7566 is_hdr_metadata_different(old_con_state, new_con_state);
7568 if (!scaling_changed && !abm_changed && !hdr_changed)
7571 stream_update.stream = dm_new_crtc_state->stream;
7572 if (scaling_changed) {
7573 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7574 dm_new_con_state, dm_new_crtc_state->stream);
7576 stream_update.src = dm_new_crtc_state->stream->src;
7577 stream_update.dst = dm_new_crtc_state->stream->dst;
7581 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7583 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7587 fill_hdr_info_packet(new_con_state, &hdr_packet);
7588 stream_update.hdr_static_metadata = &hdr_packet;
7591 status = dc_stream_get_status(dm_new_crtc_state->stream);
7593 WARN_ON(!status->plane_count);
7596 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7597 * Here we create an empty update on each plane.
7598 * To fix this, DC should permit updating only stream properties.
7600 for (j = 0; j < status->plane_count; j++)
7601 dummy_updates[j].surface = status->plane_states[0];
7604 mutex_lock(&dm->dc_lock);
7605 dc_commit_updates_for_stream(dm->dc,
7607 status->plane_count,
7608 dm_new_crtc_state->stream,
7611 mutex_unlock(&dm->dc_lock);
7614 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7615 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7616 new_crtc_state, i) {
7617 if (old_crtc_state->active && !new_crtc_state->active)
7618 crtc_disable_count++;
7620 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7621 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7623 /* Update freesync active state. */
7624 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7626 /* Handle vrr on->off / off->on transitions */
7627 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7632 * Enable interrupts for CRTCs that are newly enabled or went through
7633 * a modeset. It was intentionally deferred until after the front end
7634 * state was modified to wait until the OTG was on and so the IRQ
7635 * handlers didn't access stale or invalid state.
7637 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7638 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7640 if (new_crtc_state->active &&
7641 (!old_crtc_state->active ||
7642 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7643 manage_dm_interrupts(adev, acrtc, true);
7644 #ifdef CONFIG_DEBUG_FS
7646 * Frontend may have changed so reapply the CRC capture
7647 * settings for the stream.
7649 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7651 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7652 amdgpu_dm_crtc_configure_crc_source(
7653 crtc, dm_new_crtc_state,
7654 dm_new_crtc_state->crc_src);
7660 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7661 if (new_crtc_state->async_flip)
7662 wait_for_vblank = false;
7664 /* update planes when needed per crtc*/
7665 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7666 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7668 if (dm_new_crtc_state->stream)
7669 amdgpu_dm_commit_planes(state, dc_state, dev,
7670 dm, crtc, wait_for_vblank);
7673 /* Update audio instances for each connector. */
7674 amdgpu_dm_commit_audio(dev, state);
7677 * send vblank event on all events not handled in flip and
7678 * mark consumed event for drm_atomic_helper_commit_hw_done
7680 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7681 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7683 if (new_crtc_state->event)
7684 drm_send_event_locked(dev, &new_crtc_state->event->base);
7686 new_crtc_state->event = NULL;
7688 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7690 /* Signal HW programming completion */
7691 drm_atomic_helper_commit_hw_done(state);
7693 if (wait_for_vblank)
7694 drm_atomic_helper_wait_for_flip_done(dev, state);
7696 drm_atomic_helper_cleanup_planes(dev, state);
7699 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7700 * so we can put the GPU into runtime suspend if we're not driving any
7703 for (i = 0; i < crtc_disable_count; i++)
7704 pm_runtime_put_autosuspend(dev->dev);
7705 pm_runtime_mark_last_busy(dev->dev);
7708 dc_release_state(dc_state_temp);
7712 static int dm_force_atomic_commit(struct drm_connector *connector)
7715 struct drm_device *ddev = connector->dev;
7716 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7717 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7718 struct drm_plane *plane = disconnected_acrtc->base.primary;
7719 struct drm_connector_state *conn_state;
7720 struct drm_crtc_state *crtc_state;
7721 struct drm_plane_state *plane_state;
7726 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7728 /* Construct an atomic state to restore previous display setting */
7731 * Attach connectors to drm_atomic_state
7733 conn_state = drm_atomic_get_connector_state(state, connector);
7735 ret = PTR_ERR_OR_ZERO(conn_state);
7739 /* Attach crtc to drm_atomic_state*/
7740 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7742 ret = PTR_ERR_OR_ZERO(crtc_state);
7746 /* force a restore */
7747 crtc_state->mode_changed = true;
7749 /* Attach plane to drm_atomic_state */
7750 plane_state = drm_atomic_get_plane_state(state, plane);
7752 ret = PTR_ERR_OR_ZERO(plane_state);
7757 /* Call commit internally with the state we just constructed */
7758 ret = drm_atomic_commit(state);
7763 DRM_ERROR("Restoring old state failed with %i\n", ret);
7764 drm_atomic_state_put(state);
7770 * This function handles all cases when set mode does not come upon hotplug.
7771 * This includes when a display is unplugged then plugged back into the
7772 * same port and when running without usermode desktop manager supprot
7774 void dm_restore_drm_connector_state(struct drm_device *dev,
7775 struct drm_connector *connector)
7777 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7778 struct amdgpu_crtc *disconnected_acrtc;
7779 struct dm_crtc_state *acrtc_state;
7781 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7784 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7785 if (!disconnected_acrtc)
7788 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7789 if (!acrtc_state->stream)
7793 * If the previous sink is not released and different from the current,
7794 * we deduce we are in a state where we can not rely on usermode call
7795 * to turn on the display, so we do it here
7797 if (acrtc_state->stream->sink != aconnector->dc_sink)
7798 dm_force_atomic_commit(&aconnector->base);
7802 * Grabs all modesetting locks to serialize against any blocking commits,
7803 * Waits for completion of all non blocking commits.
7805 static int do_aquire_global_lock(struct drm_device *dev,
7806 struct drm_atomic_state *state)
7808 struct drm_crtc *crtc;
7809 struct drm_crtc_commit *commit;
7813 * Adding all modeset locks to aquire_ctx will
7814 * ensure that when the framework release it the
7815 * extra locks we are locking here will get released to
7817 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7821 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7822 spin_lock(&crtc->commit_lock);
7823 commit = list_first_entry_or_null(&crtc->commit_list,
7824 struct drm_crtc_commit, commit_entry);
7826 drm_crtc_commit_get(commit);
7827 spin_unlock(&crtc->commit_lock);
7833 * Make sure all pending HW programming completed and
7836 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7839 ret = wait_for_completion_interruptible_timeout(
7840 &commit->flip_done, 10*HZ);
7843 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7844 "timed out\n", crtc->base.id, crtc->name);
7846 drm_crtc_commit_put(commit);
7849 return ret < 0 ? ret : 0;
7852 static void get_freesync_config_for_crtc(
7853 struct dm_crtc_state *new_crtc_state,
7854 struct dm_connector_state *new_con_state)
7856 struct mod_freesync_config config = {0};
7857 struct amdgpu_dm_connector *aconnector =
7858 to_amdgpu_dm_connector(new_con_state->base.connector);
7859 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7860 int vrefresh = drm_mode_vrefresh(mode);
7862 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7863 vrefresh >= aconnector->min_vfreq &&
7864 vrefresh <= aconnector->max_vfreq;
7866 if (new_crtc_state->vrr_supported) {
7867 new_crtc_state->stream->ignore_msa_timing_param = true;
7868 config.state = new_crtc_state->base.vrr_enabled ?
7869 VRR_STATE_ACTIVE_VARIABLE :
7871 config.min_refresh_in_uhz =
7872 aconnector->min_vfreq * 1000000;
7873 config.max_refresh_in_uhz =
7874 aconnector->max_vfreq * 1000000;
7875 config.vsif_supported = true;
7879 new_crtc_state->freesync_config = config;
7882 static void reset_freesync_config_for_crtc(
7883 struct dm_crtc_state *new_crtc_state)
7885 new_crtc_state->vrr_supported = false;
7887 memset(&new_crtc_state->vrr_params, 0,
7888 sizeof(new_crtc_state->vrr_params));
7889 memset(&new_crtc_state->vrr_infopacket, 0,
7890 sizeof(new_crtc_state->vrr_infopacket));
7893 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7894 struct drm_atomic_state *state,
7895 struct drm_crtc *crtc,
7896 struct drm_crtc_state *old_crtc_state,
7897 struct drm_crtc_state *new_crtc_state,
7899 bool *lock_and_validation_needed)
7901 struct dm_atomic_state *dm_state = NULL;
7902 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7903 struct dc_stream_state *new_stream;
7907 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7908 * update changed items
7910 struct amdgpu_crtc *acrtc = NULL;
7911 struct amdgpu_dm_connector *aconnector = NULL;
7912 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7913 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7917 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7918 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7919 acrtc = to_amdgpu_crtc(crtc);
7920 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7922 /* TODO This hack should go away */
7923 if (aconnector && enable) {
7924 /* Make sure fake sink is created in plug-in scenario */
7925 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7927 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7930 if (IS_ERR(drm_new_conn_state)) {
7931 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7935 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7936 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7938 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7941 new_stream = create_validate_stream_for_sink(aconnector,
7942 &new_crtc_state->mode,
7944 dm_old_crtc_state->stream);
7947 * we can have no stream on ACTION_SET if a display
7948 * was disconnected during S3, in this case it is not an
7949 * error, the OS will be updated after detection, and
7950 * will do the right thing on next atomic commit
7954 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7955 __func__, acrtc->base.base.id);
7960 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7962 ret = fill_hdr_info_packet(drm_new_conn_state,
7963 &new_stream->hdr_static_metadata);
7968 * If we already removed the old stream from the context
7969 * (and set the new stream to NULL) then we can't reuse
7970 * the old stream even if the stream and scaling are unchanged.
7971 * We'll hit the BUG_ON and black screen.
7973 * TODO: Refactor this function to allow this check to work
7974 * in all conditions.
7976 if (dm_new_crtc_state->stream &&
7977 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7978 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7979 new_crtc_state->mode_changed = false;
7980 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7981 new_crtc_state->mode_changed);
7985 /* mode_changed flag may get updated above, need to check again */
7986 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7990 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7991 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7992 "connectors_changed:%d\n",
7994 new_crtc_state->enable,
7995 new_crtc_state->active,
7996 new_crtc_state->planes_changed,
7997 new_crtc_state->mode_changed,
7998 new_crtc_state->active_changed,
7999 new_crtc_state->connectors_changed);
8001 /* Remove stream for any changed/disabled CRTC */
8004 if (!dm_old_crtc_state->stream)
8007 ret = dm_atomic_get_state(state, &dm_state);
8011 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8014 /* i.e. reset mode */
8015 if (dc_remove_stream_from_ctx(
8018 dm_old_crtc_state->stream) != DC_OK) {
8023 dc_stream_release(dm_old_crtc_state->stream);
8024 dm_new_crtc_state->stream = NULL;
8026 reset_freesync_config_for_crtc(dm_new_crtc_state);
8028 *lock_and_validation_needed = true;
8030 } else {/* Add stream for any updated/enabled CRTC */
8032 * Quick fix to prevent NULL pointer on new_stream when
8033 * added MST connectors not found in existing crtc_state in the chained mode
8034 * TODO: need to dig out the root cause of that
8036 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8039 if (modereset_required(new_crtc_state))
8042 if (modeset_required(new_crtc_state, new_stream,
8043 dm_old_crtc_state->stream)) {
8045 WARN_ON(dm_new_crtc_state->stream);
8047 ret = dm_atomic_get_state(state, &dm_state);
8051 dm_new_crtc_state->stream = new_stream;
8053 dc_stream_retain(new_stream);
8055 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8058 if (dc_add_stream_to_ctx(
8061 dm_new_crtc_state->stream) != DC_OK) {
8066 *lock_and_validation_needed = true;
8071 /* Release extra reference */
8073 dc_stream_release(new_stream);
8076 * We want to do dc stream updates that do not require a
8077 * full modeset below.
8079 if (!(enable && aconnector && new_crtc_state->enable &&
8080 new_crtc_state->active))
8083 * Given above conditions, the dc state cannot be NULL because:
8084 * 1. We're in the process of enabling CRTCs (just been added
8085 * to the dc context, or already is on the context)
8086 * 2. Has a valid connector attached, and
8087 * 3. Is currently active and enabled.
8088 * => The dc stream state currently exists.
8090 BUG_ON(dm_new_crtc_state->stream == NULL);
8092 /* Scaling or underscan settings */
8093 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8094 update_stream_scaling_settings(
8095 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8098 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8101 * Color management settings. We also update color properties
8102 * when a modeset is needed, to ensure it gets reprogrammed.
8104 if (dm_new_crtc_state->base.color_mgmt_changed ||
8105 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8106 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8111 /* Update Freesync settings. */
8112 get_freesync_config_for_crtc(dm_new_crtc_state,
8119 dc_stream_release(new_stream);
8123 static bool should_reset_plane(struct drm_atomic_state *state,
8124 struct drm_plane *plane,
8125 struct drm_plane_state *old_plane_state,
8126 struct drm_plane_state *new_plane_state)
8128 struct drm_plane *other;
8129 struct drm_plane_state *old_other_state, *new_other_state;
8130 struct drm_crtc_state *new_crtc_state;
8134 * TODO: Remove this hack once the checks below are sufficient
8135 * enough to determine when we need to reset all the planes on
8138 if (state->allow_modeset)
8141 /* Exit early if we know that we're adding or removing the plane. */
8142 if (old_plane_state->crtc != new_plane_state->crtc)
8145 /* old crtc == new_crtc == NULL, plane not in context. */
8146 if (!new_plane_state->crtc)
8150 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8152 if (!new_crtc_state)
8155 /* CRTC Degamma changes currently require us to recreate planes. */
8156 if (new_crtc_state->color_mgmt_changed)
8159 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8163 * If there are any new primary or overlay planes being added or
8164 * removed then the z-order can potentially change. To ensure
8165 * correct z-order and pipe acquisition the current DC architecture
8166 * requires us to remove and recreate all existing planes.
8168 * TODO: Come up with a more elegant solution for this.
8170 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8171 if (other->type == DRM_PLANE_TYPE_CURSOR)
8174 if (old_other_state->crtc != new_plane_state->crtc &&
8175 new_other_state->crtc != new_plane_state->crtc)
8178 if (old_other_state->crtc != new_other_state->crtc)
8181 /* TODO: Remove this once we can handle fast format changes. */
8182 if (old_other_state->fb && new_other_state->fb &&
8183 old_other_state->fb->format != new_other_state->fb->format)
8190 static int dm_update_plane_state(struct dc *dc,
8191 struct drm_atomic_state *state,
8192 struct drm_plane *plane,
8193 struct drm_plane_state *old_plane_state,
8194 struct drm_plane_state *new_plane_state,
8196 bool *lock_and_validation_needed)
8199 struct dm_atomic_state *dm_state = NULL;
8200 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8201 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8202 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8203 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8204 struct amdgpu_crtc *new_acrtc;
8209 new_plane_crtc = new_plane_state->crtc;
8210 old_plane_crtc = old_plane_state->crtc;
8211 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8212 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8214 /*TODO Implement better atomic check for cursor plane */
8215 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8216 if (!enable || !new_plane_crtc ||
8217 drm_atomic_plane_disabling(plane->state, new_plane_state))
8220 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8222 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8223 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8224 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8225 new_plane_state->crtc_w, new_plane_state->crtc_h);
8232 needs_reset = should_reset_plane(state, plane, old_plane_state,
8235 /* Remove any changed/removed planes */
8240 if (!old_plane_crtc)
8243 old_crtc_state = drm_atomic_get_old_crtc_state(
8244 state, old_plane_crtc);
8245 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8247 if (!dm_old_crtc_state->stream)
8250 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8251 plane->base.id, old_plane_crtc->base.id);
8253 ret = dm_atomic_get_state(state, &dm_state);
8257 if (!dc_remove_plane_from_context(
8259 dm_old_crtc_state->stream,
8260 dm_old_plane_state->dc_state,
8261 dm_state->context)) {
8268 dc_plane_state_release(dm_old_plane_state->dc_state);
8269 dm_new_plane_state->dc_state = NULL;
8271 *lock_and_validation_needed = true;
8273 } else { /* Add new planes */
8274 struct dc_plane_state *dc_new_plane_state;
8276 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8279 if (!new_plane_crtc)
8282 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8283 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 if (!dm_new_crtc_state->stream)
8291 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8295 WARN_ON(dm_new_plane_state->dc_state);
8297 dc_new_plane_state = dc_create_plane_state(dc);
8298 if (!dc_new_plane_state)
8301 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8302 plane->base.id, new_plane_crtc->base.id);
8304 ret = fill_dc_plane_attributes(
8305 new_plane_crtc->dev->dev_private,
8310 dc_plane_state_release(dc_new_plane_state);
8314 ret = dm_atomic_get_state(state, &dm_state);
8316 dc_plane_state_release(dc_new_plane_state);
8321 * Any atomic check errors that occur after this will
8322 * not need a release. The plane state will be attached
8323 * to the stream, and therefore part of the atomic
8324 * state. It'll be released when the atomic state is
8327 if (!dc_add_plane_to_context(
8329 dm_new_crtc_state->stream,
8331 dm_state->context)) {
8333 dc_plane_state_release(dc_new_plane_state);
8337 dm_new_plane_state->dc_state = dc_new_plane_state;
8339 /* Tell DC to do a full surface update every time there
8340 * is a plane change. Inefficient, but works for now.
8342 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8344 *lock_and_validation_needed = true;
8352 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8353 struct drm_atomic_state *state,
8354 enum surface_update_type *out_type)
8356 struct dc *dc = dm->dc;
8357 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8358 int i, j, num_plane, ret = 0;
8359 struct drm_plane_state *old_plane_state, *new_plane_state;
8360 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8361 struct drm_crtc *new_plane_crtc;
8362 struct drm_plane *plane;
8364 struct drm_crtc *crtc;
8365 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8366 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8367 struct dc_stream_status *status = NULL;
8368 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8369 struct surface_info_bundle {
8370 struct dc_surface_update surface_updates[MAX_SURFACES];
8371 struct dc_plane_info plane_infos[MAX_SURFACES];
8372 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8373 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8374 struct dc_stream_update stream_update;
8377 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8380 DRM_ERROR("Failed to allocate update bundle\n");
8381 /* Set type to FULL to avoid crashing in DC*/
8382 update_type = UPDATE_TYPE_FULL;
8386 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8388 memset(bundle, 0, sizeof(struct surface_info_bundle));
8390 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8391 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8394 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8395 update_type = UPDATE_TYPE_FULL;
8399 if (!new_dm_crtc_state->stream)
8402 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8403 const struct amdgpu_framebuffer *amdgpu_fb =
8404 to_amdgpu_framebuffer(new_plane_state->fb);
8405 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8406 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8407 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8408 uint64_t tiling_flags;
8409 bool tmz_surface = false;
8411 new_plane_crtc = new_plane_state->crtc;
8412 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8413 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8415 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8418 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8419 update_type = UPDATE_TYPE_FULL;
8423 if (crtc != new_plane_crtc)
8426 bundle->surface_updates[num_plane].surface =
8427 new_dm_plane_state->dc_state;
8429 if (new_crtc_state->mode_changed) {
8430 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8431 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8434 if (new_crtc_state->color_mgmt_changed) {
8435 bundle->surface_updates[num_plane].gamma =
8436 new_dm_plane_state->dc_state->gamma_correction;
8437 bundle->surface_updates[num_plane].in_transfer_func =
8438 new_dm_plane_state->dc_state->in_transfer_func;
8439 bundle->surface_updates[num_plane].gamut_remap_matrix =
8440 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8441 bundle->stream_update.gamut_remap =
8442 &new_dm_crtc_state->stream->gamut_remap_matrix;
8443 bundle->stream_update.output_csc_transform =
8444 &new_dm_crtc_state->stream->csc_color_matrix;
8445 bundle->stream_update.out_transfer_func =
8446 new_dm_crtc_state->stream->out_transfer_func;
8449 ret = fill_dc_scaling_info(new_plane_state,
8454 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8457 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8461 ret = fill_dc_plane_info_and_addr(
8462 dm->adev, new_plane_state, tiling_flags,
8464 &flip_addr->address, tmz_surface,
8469 bundle->surface_updates[num_plane].plane_info = plane_info;
8470 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8479 ret = dm_atomic_get_state(state, &dm_state);
8483 old_dm_state = dm_atomic_get_old_state(state);
8484 if (!old_dm_state) {
8489 status = dc_stream_get_status_from_state(old_dm_state->context,
8490 new_dm_crtc_state->stream);
8491 bundle->stream_update.stream = new_dm_crtc_state->stream;
8493 * TODO: DC modifies the surface during this call so we need
8494 * to lock here - find a way to do this without locking.
8496 mutex_lock(&dm->dc_lock);
8497 update_type = dc_check_update_surfaces_for_stream(
8498 dc, bundle->surface_updates, num_plane,
8499 &bundle->stream_update, status);
8500 mutex_unlock(&dm->dc_lock);
8502 if (update_type > UPDATE_TYPE_MED) {
8503 update_type = UPDATE_TYPE_FULL;
8511 *out_type = update_type;
8514 #if defined(CONFIG_DRM_AMD_DC_DCN)
8515 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8517 struct drm_connector *connector;
8518 struct drm_connector_state *conn_state;
8519 struct amdgpu_dm_connector *aconnector = NULL;
8521 for_each_new_connector_in_state(state, connector, conn_state, i) {
8522 if (conn_state->crtc != crtc)
8525 aconnector = to_amdgpu_dm_connector(connector);
8526 if (!aconnector->port || !aconnector->mst_port)
8535 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8540 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8541 * @dev: The DRM device
8542 * @state: The atomic state to commit
8544 * Validate that the given atomic state is programmable by DC into hardware.
8545 * This involves constructing a &struct dc_state reflecting the new hardware
8546 * state we wish to commit, then querying DC to see if it is programmable. It's
8547 * important not to modify the existing DC state. Otherwise, atomic_check
8548 * may unexpectedly commit hardware changes.
8550 * When validating the DC state, it's important that the right locks are
8551 * acquired. For full updates case which removes/adds/updates streams on one
8552 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8553 * that any such full update commit will wait for completion of any outstanding
8554 * flip using DRMs synchronization events. See
8555 * dm_determine_update_type_for_commit()
8557 * Note that DM adds the affected connectors for all CRTCs in state, when that
8558 * might not seem necessary. This is because DC stream creation requires the
8559 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8560 * be possible but non-trivial - a possible TODO item.
8562 * Return: -Error code if validation failed.
8564 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8565 struct drm_atomic_state *state)
8567 struct amdgpu_device *adev = dev->dev_private;
8568 struct dm_atomic_state *dm_state = NULL;
8569 struct dc *dc = adev->dm.dc;
8570 struct drm_connector *connector;
8571 struct drm_connector_state *old_con_state, *new_con_state;
8572 struct drm_crtc *crtc;
8573 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8574 struct drm_plane *plane;
8575 struct drm_plane_state *old_plane_state, *new_plane_state;
8576 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8577 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8578 enum dc_status status;
8582 * This bool will be set for true for any modeset/reset
8583 * or plane update which implies non fast surface update.
8585 bool lock_and_validation_needed = false;
8587 ret = drm_atomic_helper_check_modeset(dev, state);
8591 /* Check connector changes */
8592 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8593 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8594 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8596 /* Skip connectors that are disabled or part of modeset already. */
8597 if (!old_con_state->crtc && !new_con_state->crtc)
8600 if (!new_con_state->crtc)
8603 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8604 if (IS_ERR(new_crtc_state)) {
8605 ret = PTR_ERR(new_crtc_state);
8609 if (dm_old_con_state->abm_level !=
8610 dm_new_con_state->abm_level)
8611 new_crtc_state->connectors_changed = true;
8614 #if defined(CONFIG_DRM_AMD_DC_DCN)
8615 if (adev->asic_type >= CHIP_NAVI10) {
8616 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8617 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8618 ret = add_affected_mst_dsc_crtcs(state, crtc);
8625 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8626 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8627 !new_crtc_state->color_mgmt_changed &&
8628 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8631 if (!new_crtc_state->enable)
8634 ret = drm_atomic_add_affected_connectors(state, crtc);
8638 ret = drm_atomic_add_affected_planes(state, crtc);
8644 * Add all primary and overlay planes on the CRTC to the state
8645 * whenever a plane is enabled to maintain correct z-ordering
8646 * and to enable fast surface updates.
8648 drm_for_each_crtc(crtc, dev) {
8649 bool modified = false;
8651 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8652 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8655 if (new_plane_state->crtc == crtc ||
8656 old_plane_state->crtc == crtc) {
8665 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8666 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8670 drm_atomic_get_plane_state(state, plane);
8672 if (IS_ERR(new_plane_state)) {
8673 ret = PTR_ERR(new_plane_state);
8679 /* Remove exiting planes if they are modified */
8680 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8681 ret = dm_update_plane_state(dc, state, plane,
8685 &lock_and_validation_needed);
8690 /* Disable all crtcs which require disable */
8691 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8692 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8696 &lock_and_validation_needed);
8701 /* Enable all crtcs which require enable */
8702 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8703 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8707 &lock_and_validation_needed);
8712 /* Add new/modified planes */
8713 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8714 ret = dm_update_plane_state(dc, state, plane,
8718 &lock_and_validation_needed);
8723 /* Run this here since we want to validate the streams we created */
8724 ret = drm_atomic_helper_check_planes(dev, state);
8728 if (state->legacy_cursor_update) {
8730 * This is a fast cursor update coming from the plane update
8731 * helper, check if it can be done asynchronously for better
8734 state->async_update =
8735 !drm_atomic_helper_async_check(dev, state);
8738 * Skip the remaining global validation if this is an async
8739 * update. Cursor updates can be done without affecting
8740 * state or bandwidth calcs and this avoids the performance
8741 * penalty of locking the private state object and
8742 * allocating a new dc_state.
8744 if (state->async_update)
8748 /* Check scaling and underscan changes*/
8749 /* TODO Removed scaling changes validation due to inability to commit
8750 * new stream into context w\o causing full reset. Need to
8751 * decide how to handle.
8753 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8754 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8755 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8756 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8758 /* Skip any modesets/resets */
8759 if (!acrtc || drm_atomic_crtc_needs_modeset(
8760 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8763 /* Skip any thing not scale or underscan changes */
8764 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8767 overall_update_type = UPDATE_TYPE_FULL;
8768 lock_and_validation_needed = true;
8771 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8775 if (overall_update_type < update_type)
8776 overall_update_type = update_type;
8779 * lock_and_validation_needed was an old way to determine if we need to set
8780 * the global lock. Leaving it in to check if we broke any corner cases
8781 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8782 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8784 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8785 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8787 if (overall_update_type > UPDATE_TYPE_FAST) {
8788 ret = dm_atomic_get_state(state, &dm_state);
8792 ret = do_aquire_global_lock(dev, state);
8796 #if defined(CONFIG_DRM_AMD_DC_DCN)
8797 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8800 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8806 * Perform validation of MST topology in the state:
8807 * We need to perform MST atomic check before calling
8808 * dc_validate_global_state(), or there is a chance
8809 * to get stuck in an infinite loop and hang eventually.
8811 ret = drm_dp_mst_atomic_check(state);
8814 status = dc_validate_global_state(dc, dm_state->context, false);
8815 if (status != DC_OK) {
8816 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8817 dc_status_to_str(status), status);
8823 * The commit is a fast update. Fast updates shouldn't change
8824 * the DC context, affect global validation, and can have their
8825 * commit work done in parallel with other commits not touching
8826 * the same resource. If we have a new DC context as part of
8827 * the DM atomic state from validation we need to free it and
8828 * retain the existing one instead.
8830 * Furthermore, since the DM atomic state only contains the DC
8831 * context and can safely be annulled, we can free the state
8832 * and clear the associated private object now to free
8833 * some memory and avoid a possible use-after-free later.
8836 for (i = 0; i < state->num_private_objs; i++) {
8837 struct drm_private_obj *obj = state->private_objs[i].ptr;
8839 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8840 int j = state->num_private_objs-1;
8842 dm_atomic_destroy_state(obj,
8843 state->private_objs[i].state);
8845 /* If i is not at the end of the array then the
8846 * last element needs to be moved to where i was
8847 * before the array can safely be truncated.
8850 state->private_objs[i] =
8851 state->private_objs[j];
8853 state->private_objs[j].ptr = NULL;
8854 state->private_objs[j].state = NULL;
8855 state->private_objs[j].old_state = NULL;
8856 state->private_objs[j].new_state = NULL;
8858 state->num_private_objs = j;
8864 /* Store the overall update type for use later in atomic check. */
8865 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8866 struct dm_crtc_state *dm_new_crtc_state =
8867 to_dm_crtc_state(new_crtc_state);
8869 dm_new_crtc_state->update_type = (int)overall_update_type;
8872 /* Must be success */
8877 if (ret == -EDEADLK)
8878 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8879 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8880 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8882 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8887 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8888 struct amdgpu_dm_connector *amdgpu_dm_connector)
8891 bool capable = false;
8893 if (amdgpu_dm_connector->dc_link &&
8894 dm_helpers_dp_read_dpcd(
8896 amdgpu_dm_connector->dc_link,
8897 DP_DOWN_STREAM_PORT_COUNT,
8899 sizeof(dpcd_data))) {
8900 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8905 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8909 bool edid_check_required;
8910 struct detailed_timing *timing;
8911 struct detailed_non_pixel *data;
8912 struct detailed_data_monitor_range *range;
8913 struct amdgpu_dm_connector *amdgpu_dm_connector =
8914 to_amdgpu_dm_connector(connector);
8915 struct dm_connector_state *dm_con_state = NULL;
8917 struct drm_device *dev = connector->dev;
8918 struct amdgpu_device *adev = dev->dev_private;
8919 bool freesync_capable = false;
8921 if (!connector->state) {
8922 DRM_ERROR("%s - Connector has no state", __func__);
8927 dm_con_state = to_dm_connector_state(connector->state);
8929 amdgpu_dm_connector->min_vfreq = 0;
8930 amdgpu_dm_connector->max_vfreq = 0;
8931 amdgpu_dm_connector->pixel_clock_mhz = 0;
8936 dm_con_state = to_dm_connector_state(connector->state);
8938 edid_check_required = false;
8939 if (!amdgpu_dm_connector->dc_sink) {
8940 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8943 if (!adev->dm.freesync_module)
8946 * if edid non zero restrict freesync only for dp and edp
8949 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8950 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8951 edid_check_required = is_dp_capable_without_timing_msa(
8953 amdgpu_dm_connector);
8956 if (edid_check_required == true && (edid->version > 1 ||
8957 (edid->version == 1 && edid->revision > 1))) {
8958 for (i = 0; i < 4; i++) {
8960 timing = &edid->detailed_timings[i];
8961 data = &timing->data.other_data;
8962 range = &data->data.range;
8964 * Check if monitor has continuous frequency mode
8966 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8969 * Check for flag range limits only. If flag == 1 then
8970 * no additional timing information provided.
8971 * Default GTF, GTF Secondary curve and CVT are not
8974 if (range->flags != 1)
8977 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8978 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8979 amdgpu_dm_connector->pixel_clock_mhz =
8980 range->pixel_clock_mhz * 10;
8984 if (amdgpu_dm_connector->max_vfreq -
8985 amdgpu_dm_connector->min_vfreq > 10) {
8987 freesync_capable = true;
8993 dm_con_state->freesync_capable = freesync_capable;
8995 if (connector->vrr_capable_property)
8996 drm_connector_set_vrr_capable_property(connector,
9000 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9002 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9004 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9006 if (link->type == dc_connection_none)
9008 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9009 dpcd_data, sizeof(dpcd_data))) {
9010 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9012 if (dpcd_data[0] == 0) {
9013 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9014 link->psr_settings.psr_feature_enabled = false;
9016 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9017 link->psr_settings.psr_feature_enabled = true;
9020 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9025 * amdgpu_dm_link_setup_psr() - configure psr link
9026 * @stream: stream state
9028 * Return: true if success
9030 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9032 struct dc_link *link = NULL;
9033 struct psr_config psr_config = {0};
9034 struct psr_context psr_context = {0};
9040 link = stream->link;
9042 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9044 if (psr_config.psr_version > 0) {
9045 psr_config.psr_exit_link_training_required = 0x1;
9046 psr_config.psr_frame_capture_indication_req = 0;
9047 psr_config.psr_rfb_setup_time = 0x37;
9048 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9049 psr_config.allow_smu_optimizations = 0x0;
9051 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9054 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9060 * amdgpu_dm_psr_enable() - enable psr f/w
9061 * @stream: stream state
9063 * Return: true if success
9065 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9067 struct dc_link *link = stream->link;
9068 unsigned int vsync_rate_hz = 0;
9069 struct dc_static_screen_params params = {0};
9070 /* Calculate number of static frames before generating interrupt to
9073 // Init fail safe of 2 frames static
9074 unsigned int num_frames_static = 2;
9076 DRM_DEBUG_DRIVER("Enabling psr...\n");
9078 vsync_rate_hz = div64_u64(div64_u64((
9079 stream->timing.pix_clk_100hz * 100),
9080 stream->timing.v_total),
9081 stream->timing.h_total);
9084 * Calculate number of frames such that at least 30 ms of time has
9087 if (vsync_rate_hz != 0) {
9088 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9089 num_frames_static = (30000 / frame_time_microsec) + 1;
9092 params.triggers.cursor_update = true;
9093 params.triggers.overlay_update = true;
9094 params.triggers.surface_update = true;
9095 params.num_frames = num_frames_static;
9097 dc_stream_set_static_screen_params(link->ctx->dc,
9101 return dc_link_set_psr_allow_active(link, true, false);
9105 * amdgpu_dm_psr_disable() - disable psr f/w
9106 * @stream: stream state
9108 * Return: true if success
9110 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9113 DRM_DEBUG_DRIVER("Disabling psr...\n");
9115 return dc_link_set_psr_allow_active(stream->link, false, true);