2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
109 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
121 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123 * requests into DC requests, and DC responses into DRM responses.
125 * The root control structure is &struct amdgpu_display_manager.
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
134 switch (link->dpcd_caps.dongle_type) {
135 case DISPLAY_DONGLE_NONE:
136 return DRM_MODE_SUBCONNECTOR_Native;
137 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 return DRM_MODE_SUBCONNECTOR_VGA;
139 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 return DRM_MODE_SUBCONNECTOR_DVID;
142 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_HDMIA;
145 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_Unknown;
151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
153 struct dc_link *link = aconnector->dc_link;
154 struct drm_connector *connector = &aconnector->base;
155 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
157 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 if (aconnector->dc_sink)
161 subconnector = get_subconnector_type(link);
163 drm_object_property_set_value(&connector->base,
164 connector->dev->mode_config.dp_subconnector_property,
169 * initializes drm_device display related structures, based on the information
170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171 * drm_encoder, drm_mode_config
173 * Returns 0 on success
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 struct drm_plane *plane,
181 unsigned long possible_crtcs,
182 const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 struct drm_plane *plane,
185 uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 struct amdgpu_dm_connector *amdgpu_dm_connector,
189 struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 struct amdgpu_encoder *aencoder,
192 uint32_t link_index);
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 struct drm_atomic_state *state,
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 struct drm_atomic_state *state);
205 static void handle_cursor_update(struct drm_plane *plane,
206 struct drm_plane_state *old_plane_state);
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
215 * dm_vblank_get_counter
218 * Get counter for number of vertical blanks
221 * struct amdgpu_device *adev - [in] desired amdgpu device
222 * int disp_idx - [in] which CRTC to get the counter from
225 * Counter for vertical blanks
227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 if (crtc >= adev->mode_info.num_crtc)
232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 if (acrtc->dm_irq_params.stream == NULL) {
235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
240 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 u32 *vbl, u32 *position)
247 uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 if (acrtc->dm_irq_params.stream == NULL) {
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 * TODO rework base driver to use values directly.
262 * for now parse it back into reg-format
264 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 *position = v_position | (h_position << 16);
271 *vbl = v_blank_start | (v_blank_end << 16);
277 static bool dm_is_idle(void *handle)
283 static int dm_wait_for_idle(void *handle)
289 static bool dm_check_soft_reset(void *handle)
294 static int dm_soft_reset(void *handle)
300 static struct amdgpu_crtc *
301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
304 struct drm_device *dev = adev_to_drm(adev);
305 struct drm_crtc *crtc;
306 struct amdgpu_crtc *amdgpu_crtc;
308 if (otg_inst == -1) {
310 return adev->mode_info.crtcs[0];
313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 amdgpu_crtc = to_amdgpu_crtc(crtc);
316 if (amdgpu_crtc->otg_inst == otg_inst)
323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 return acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_VARIABLE ||
327 acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_FIXED;
331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
338 * dm_pflip_high_irq() - Handle pageflip interrupt
339 * @interrupt_params: ignored
341 * Handles the pageflip interrupt by notifying all interested parties
342 * that the pageflip has been completed.
344 static void dm_pflip_high_irq(void *interrupt_params)
346 struct amdgpu_crtc *amdgpu_crtc;
347 struct common_irq_params *irq_params = interrupt_params;
348 struct amdgpu_device *adev = irq_params->adev;
350 struct drm_pending_vblank_event *e;
351 uint32_t vpos, hpos, v_blank_start, v_blank_end;
354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 /* IRQ could occur when in initial stage */
357 /* TODO work and BO cleanup */
358 if (amdgpu_crtc == NULL) {
359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
363 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 amdgpu_crtc->pflip_status,
368 AMDGPU_FLIP_SUBMITTED,
369 amdgpu_crtc->crtc_id,
371 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
375 /* page flip completed. */
376 e = amdgpu_crtc->event;
377 amdgpu_crtc->event = NULL;
382 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 &v_blank_end, &hpos, &vpos) ||
388 (vpos < v_blank_start)) {
389 /* Update to correct count and vblank timestamp if racing with
390 * vblank irq. This also updates to the correct vblank timestamp
391 * even in VRR mode, as scanout is past the front-porch atm.
393 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 /* Wake up userspace by sending the pageflip event with proper
396 * count and timestamp of vblank of flip completion.
399 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 /* Event sent, so done with vblank for this flip */
402 drm_crtc_vblank_put(&amdgpu_crtc->base);
405 /* VRR active and inside front-porch: vblank count and
406 * timestamp for pageflip event will only be up to date after
407 * drm_crtc_handle_vblank() has been executed from late vblank
408 * irq handler after start of back-porch (vline 0). We queue the
409 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 * updated timestamp and count, once it runs after us.
412 * We need to open-code this instead of using the helper
413 * drm_crtc_arm_vblank_event(), as that helper would
414 * call drm_crtc_accurate_vblank_count(), which we must
415 * not call in VRR mode while we are in front-porch!
418 /* sequence will be replaced by real count during send-out. */
419 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 e->pipe = amdgpu_crtc->crtc_id;
422 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
426 /* Keep track of vblank of this flip for flip throttling. We use the
427 * cooked hw counter, as that one incremented at start of this vblank
428 * of pageflip completion, so last_flip_vblank is the forbidden count
429 * for queueing new pageflips if vsync + VRR is enabled.
431 amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 vrr_active, (int) !e);
442 static void dm_vupdate_high_irq(void *interrupt_params)
444 struct common_irq_params *irq_params = interrupt_params;
445 struct amdgpu_device *adev = irq_params->adev;
446 struct amdgpu_crtc *acrtc;
450 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
453 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
459 /* Core vblank handling is done here after end of front-porch in
460 * vrr mode, as vblank timestamping will give valid results
461 * while now done after front-porch. This will also deliver
462 * page-flip completion events that have been queued to us
463 * if a pageflip happened inside front-porch.
466 drm_crtc_handle_vblank(&acrtc->base);
468 /* BTR processing for pre-DCE12 ASICs */
469 if (acrtc->dm_irq_params.stream &&
470 adev->family < AMDGPU_FAMILY_AI) {
471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 mod_freesync_handle_v_update(
473 adev->dm.freesync_module,
474 acrtc->dm_irq_params.stream,
475 &acrtc->dm_irq_params.vrr_params);
477 dc_stream_adjust_vmin_vmax(
479 acrtc->dm_irq_params.stream,
480 &acrtc->dm_irq_params.vrr_params.adjust);
481 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
488 * dm_crtc_high_irq() - Handles CRTC interrupt
489 * @interrupt_params: used for determining the CRTC instance
491 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
494 static void dm_crtc_high_irq(void *interrupt_params)
496 struct common_irq_params *irq_params = interrupt_params;
497 struct amdgpu_device *adev = irq_params->adev;
498 struct amdgpu_crtc *acrtc;
502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
506 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 vrr_active, acrtc->dm_irq_params.active_planes);
512 * Core vblank handling at start of front-porch is only possible
513 * in non-vrr mode, as only there vblank timestamping will give
514 * valid results while done in front-porch. Otherwise defer it
515 * to dm_vupdate_high_irq after end of front-porch.
518 drm_crtc_handle_vblank(&acrtc->base);
521 * Following stuff must happen at start of vblank, for crc
522 * computation and below-the-range btr support in vrr mode.
524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 /* BTR updates need to happen before VUPDATE on Vega and above. */
527 if (adev->family < AMDGPU_FAMILY_AI)
530 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 if (acrtc->dm_irq_params.stream &&
533 acrtc->dm_irq_params.vrr_params.supported &&
534 acrtc->dm_irq_params.freesync_config.state ==
535 VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(adev->dm.freesync_module,
537 acrtc->dm_irq_params.stream,
538 &acrtc->dm_irq_params.vrr_params);
540 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 &acrtc->dm_irq_params.vrr_params.adjust);
545 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 * In that case, pageflip completion interrupts won't fire and pageflip
547 * completion events won't get delivered. Prevent this by sending
548 * pending pageflip events from here if a flip is still pending.
550 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 * avoid race conditions between flip programming and completion,
552 * which could cause too early flip completion events.
554 if (adev->family >= AMDGPU_FAMILY_RV &&
555 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 acrtc->dm_irq_params.active_planes == 0) {
558 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 drm_crtc_vblank_put(&acrtc->base);
562 acrtc->pflip_status = AMDGPU_FLIP_NONE;
565 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
568 static int dm_set_clockgating_state(void *handle,
569 enum amd_clockgating_state state)
574 static int dm_set_powergating_state(void *handle,
575 enum amd_powergating_state state)
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
583 /* Allocate memory for FBC compressed data */
584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 struct drm_device *dev = connector->dev;
587 struct amdgpu_device *adev = drm_to_adev(dev);
588 struct dm_compressor_info *compressor = &adev->dm.compressor;
589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 struct drm_display_mode *mode;
591 unsigned long max_size = 0;
593 if (adev->dm.dc->fbc_compressor == NULL)
596 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
599 if (compressor->bo_ptr)
603 list_for_each_entry(mode, &connector->modes, head) {
604 if (max_size < mode->htotal * mode->vtotal)
605 max_size = mode->htotal * mode->vtotal;
609 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 &compressor->gpu_addr, &compressor->cpu_addr);
614 DRM_ERROR("DM: Failed to initialize FBC\n");
616 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 int pipe, bool *enabled,
626 unsigned char *buf, int max_bytes)
628 struct drm_device *dev = dev_get_drvdata(kdev);
629 struct amdgpu_device *adev = drm_to_adev(dev);
630 struct drm_connector *connector;
631 struct drm_connector_list_iter conn_iter;
632 struct amdgpu_dm_connector *aconnector;
637 mutex_lock(&adev->dm.audio_lock);
639 drm_connector_list_iter_begin(dev, &conn_iter);
640 drm_for_each_connector_iter(connector, &conn_iter) {
641 aconnector = to_amdgpu_dm_connector(connector);
642 if (aconnector->audio_inst != port)
646 ret = drm_eld_size(connector->eld);
647 memcpy(buf, connector->eld, min(max_bytes, ret));
651 drm_connector_list_iter_end(&conn_iter);
653 mutex_unlock(&adev->dm.audio_lock);
655 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 .get_eld = amdgpu_dm_audio_component_get_eld,
664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 struct device *hda_kdev, void *data)
667 struct drm_device *dev = dev_get_drvdata(kdev);
668 struct amdgpu_device *adev = drm_to_adev(dev);
669 struct drm_audio_component *acomp = data;
671 acomp->ops = &amdgpu_dm_audio_component_ops;
673 adev->dm.audio_component = acomp;
678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 struct device *hda_kdev, void *data)
681 struct drm_device *dev = dev_get_drvdata(kdev);
682 struct amdgpu_device *adev = drm_to_adev(dev);
683 struct drm_audio_component *acomp = data;
687 adev->dm.audio_component = NULL;
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 .bind = amdgpu_dm_audio_component_bind,
692 .unbind = amdgpu_dm_audio_component_unbind,
695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702 adev->mode_info.audio.enabled = true;
704 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 adev->mode_info.audio.pin[i].channels = -1;
708 adev->mode_info.audio.pin[i].rate = -1;
709 adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 adev->mode_info.audio.pin[i].status_bits = 0;
711 adev->mode_info.audio.pin[i].category_code = 0;
712 adev->mode_info.audio.pin[i].connected = false;
713 adev->mode_info.audio.pin[i].id =
714 adev->dm.dc->res_pool->audios[i]->inst;
715 adev->mode_info.audio.pin[i].offset = 0;
718 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
722 adev->dm.audio_registered = true;
727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
732 if (!adev->mode_info.audio.enabled)
735 if (adev->dm.audio_registered) {
736 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 adev->dm.audio_registered = false;
740 /* TODO: Disable audio? */
742 adev->mode_info.audio.enabled = false;
745 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 struct drm_audio_component *acomp = adev->dm.audio_component;
749 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 const struct dmcub_firmware_header_v1_0 *hdr;
760 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 struct abm *abm = adev->dm.dc->res_pool->abm;
765 struct dmub_srv_hw_params hw_params;
766 enum dmub_status status;
767 const unsigned char *fw_inst_const, *fw_bss_data;
768 uint32_t i, fw_inst_const_size, fw_bss_data_size;
772 /* DMUB isn't supported on the ASIC. */
776 DRM_ERROR("No framebuffer info for DMUB service.\n");
781 /* Firmware required for DMUB support. */
782 DRM_ERROR("No firmware provided for DMUB.\n");
786 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 if (status != DMUB_STATUS_OK) {
788 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
792 if (!has_hw_support) {
793 DRM_INFO("DMUB unsupported on ASIC\n");
797 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 fw_inst_const = dmub_fw->data +
800 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803 fw_bss_data = dmub_fw->data +
804 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 le32_to_cpu(hdr->inst_const_bytes);
807 /* Copy firmware and bios info into FB memory. */
808 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 * amdgpu_ucode_init_single_fw will load dmub firmware
815 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 * will be done by dm_dmub_hw_init
818 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
823 if (fw_bss_data_size)
824 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 fw_bss_data, fw_bss_data_size);
827 /* Copy firmware bios info into FB memory. */
828 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
831 /* Reset regions that need to be reset. */
832 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 /* Initialize hardware. */
842 memset(&hw_params, 0, sizeof(hw_params));
843 hw_params.fb_base = adev->gmc.fb_start;
844 hw_params.fb_offset = adev->gmc.aper_base;
846 /* backdoor load firmware and trigger dmub running */
847 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 hw_params.load_inst_const = true;
851 hw_params.psp_version = dmcu->psp_version;
853 for (i = 0; i < fb_info->num_fb; ++i)
854 hw_params.fb[i] = &fb_info->fb[i];
856 status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 if (status != DMUB_STATUS_OK) {
858 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
862 /* Wait for firmware load to finish. */
863 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 if (status != DMUB_STATUS_OK)
865 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 /* Init DMCU and ABM if available. */
869 dmcu->funcs->dmcu_init(dmcu);
870 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
874 if (!adev->dm.dc->ctx->dmub_srv) {
875 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
879 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
880 adev->dm.dmcub_fw_version);
885 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
886 struct drm_atomic_state *state)
888 struct drm_connector *connector;
889 struct drm_crtc *crtc;
890 struct amdgpu_dm_connector *amdgpu_dm_connector;
891 struct drm_connector_state *conn_state;
892 struct dm_crtc_state *acrtc_state;
893 struct drm_crtc_state *crtc_state;
894 struct dc_stream_state *stream;
895 struct drm_device *dev = adev_to_drm(adev);
897 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
900 conn_state = connector->state;
902 if (!(conn_state && conn_state->crtc))
905 crtc = conn_state->crtc;
906 acrtc_state = to_dm_crtc_state(crtc->state);
908 if (!(acrtc_state && acrtc_state->stream))
911 stream = acrtc_state->stream;
913 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
914 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
915 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
916 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
917 conn_state = drm_atomic_get_connector_state(state, connector);
918 crtc_state = drm_atomic_get_crtc_state(state, crtc);
919 crtc_state->mode_changed = true;
924 static int amdgpu_dm_init(struct amdgpu_device *adev)
926 struct dc_init_data init_data;
927 #ifdef CONFIG_DRM_AMD_DC_HDCP
928 struct dc_callback_init init_params;
932 adev->dm.ddev = adev_to_drm(adev);
933 adev->dm.adev = adev;
935 /* Zero all the fields */
936 memset(&init_data, 0, sizeof(init_data));
937 #ifdef CONFIG_DRM_AMD_DC_HDCP
938 memset(&init_params, 0, sizeof(init_params));
941 mutex_init(&adev->dm.dc_lock);
942 mutex_init(&adev->dm.audio_lock);
944 if(amdgpu_dm_irq_init(adev)) {
945 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
949 init_data.asic_id.chip_family = adev->family;
951 init_data.asic_id.pci_revision_id = adev->pdev->revision;
952 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
954 init_data.asic_id.vram_width = adev->gmc.vram_width;
955 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
956 init_data.asic_id.atombios_base_address =
957 adev->mode_info.atom_context->bios;
959 init_data.driver = adev;
961 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
963 if (!adev->dm.cgs_device) {
964 DRM_ERROR("amdgpu: failed to create cgs device.\n");
968 init_data.cgs_device = adev->dm.cgs_device;
970 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
972 switch (adev->asic_type) {
977 init_data.flags.gpu_vm_support = true;
978 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
979 init_data.flags.disable_dmcu = true;
985 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
986 init_data.flags.fbc_support = true;
988 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
989 init_data.flags.multi_mon_pp_mclk_switch = true;
991 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
992 init_data.flags.disable_fractional_pwm = true;
994 init_data.flags.power_down_display_on_boot = true;
996 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
998 /* Display Core create. */
999 adev->dm.dc = dc_create(&init_data);
1002 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1004 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1008 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1009 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1010 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1013 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1014 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1016 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1017 adev->dm.dc->debug.disable_stutter = true;
1019 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1020 adev->dm.dc->debug.disable_dsc = true;
1022 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1023 adev->dm.dc->debug.disable_clock_gate = true;
1025 r = dm_dmub_hw_init(adev);
1027 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1031 dc_hardware_init(adev->dm.dc);
1033 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1034 if (!adev->dm.freesync_module) {
1036 "amdgpu: failed to initialize freesync_module.\n");
1038 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1039 adev->dm.freesync_module);
1041 amdgpu_dm_init_color_mod();
1043 #ifdef CONFIG_DRM_AMD_DC_HDCP
1044 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1045 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1047 if (!adev->dm.hdcp_workqueue)
1048 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1050 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1052 dc_init_callbacks(adev->dm.dc, &init_params);
1055 if (amdgpu_dm_initialize_drm_device(adev)) {
1057 "amdgpu: failed to initialize sw for display support.\n");
1061 /* Update the actual used number of crtc */
1062 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1064 /* create fake encoders for MST */
1065 dm_dp_create_fake_mst_encoders(adev);
1067 /* TODO: Add_display_info? */
1069 /* TODO use dynamic cursor width */
1070 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1071 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1073 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1075 "amdgpu: failed to initialize sw for display support.\n");
1079 DRM_DEBUG_DRIVER("KMS initialized.\n");
1083 amdgpu_dm_fini(adev);
1088 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1092 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1093 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1096 amdgpu_dm_audio_fini(adev);
1098 amdgpu_dm_destroy_drm_device(&adev->dm);
1100 #ifdef CONFIG_DRM_AMD_DC_HDCP
1101 if (adev->dm.hdcp_workqueue) {
1102 hdcp_destroy(adev->dm.hdcp_workqueue);
1103 adev->dm.hdcp_workqueue = NULL;
1107 dc_deinit_callbacks(adev->dm.dc);
1109 if (adev->dm.dc->ctx->dmub_srv) {
1110 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1111 adev->dm.dc->ctx->dmub_srv = NULL;
1114 if (adev->dm.dmub_bo)
1115 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1116 &adev->dm.dmub_bo_gpu_addr,
1117 &adev->dm.dmub_bo_cpu_addr);
1119 /* DC Destroy TODO: Replace destroy DAL */
1121 dc_destroy(&adev->dm.dc);
1123 * TODO: pageflip, vlank interrupt
1125 * amdgpu_dm_irq_fini(adev);
1128 if (adev->dm.cgs_device) {
1129 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1130 adev->dm.cgs_device = NULL;
1132 if (adev->dm.freesync_module) {
1133 mod_freesync_destroy(adev->dm.freesync_module);
1134 adev->dm.freesync_module = NULL;
1137 mutex_destroy(&adev->dm.audio_lock);
1138 mutex_destroy(&adev->dm.dc_lock);
1143 static int load_dmcu_fw(struct amdgpu_device *adev)
1145 const char *fw_name_dmcu = NULL;
1147 const struct dmcu_firmware_header_v1_0 *hdr;
1149 switch(adev->asic_type) {
1150 #if defined(CONFIG_DRM_AMD_DC_SI)
1165 case CHIP_POLARIS11:
1166 case CHIP_POLARIS10:
1167 case CHIP_POLARIS12:
1175 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1176 case CHIP_SIENNA_CICHLID:
1177 case CHIP_NAVY_FLOUNDER:
1181 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1184 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1185 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1186 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1187 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1192 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1196 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1197 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1201 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1203 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1204 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1205 adev->dm.fw_dmcu = NULL;
1209 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1214 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1216 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1218 release_firmware(adev->dm.fw_dmcu);
1219 adev->dm.fw_dmcu = NULL;
1223 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1226 adev->firmware.fw_size +=
1227 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1229 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1230 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1231 adev->firmware.fw_size +=
1232 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1234 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1241 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1243 struct amdgpu_device *adev = ctx;
1245 return dm_read_reg(adev->dm.dc->ctx, address);
1248 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1251 struct amdgpu_device *adev = ctx;
1253 return dm_write_reg(adev->dm.dc->ctx, address, value);
1256 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1258 struct dmub_srv_create_params create_params;
1259 struct dmub_srv_region_params region_params;
1260 struct dmub_srv_region_info region_info;
1261 struct dmub_srv_fb_params fb_params;
1262 struct dmub_srv_fb_info *fb_info;
1263 struct dmub_srv *dmub_srv;
1264 const struct dmcub_firmware_header_v1_0 *hdr;
1265 const char *fw_name_dmub;
1266 enum dmub_asic dmub_asic;
1267 enum dmub_status status;
1270 switch (adev->asic_type) {
1272 dmub_asic = DMUB_ASIC_DCN21;
1273 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1274 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1275 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1277 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1278 case CHIP_SIENNA_CICHLID:
1279 dmub_asic = DMUB_ASIC_DCN30;
1280 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1282 case CHIP_NAVY_FLOUNDER:
1283 dmub_asic = DMUB_ASIC_DCN30;
1284 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1289 /* ASIC doesn't support DMUB. */
1293 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1295 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1299 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1301 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1305 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1307 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1308 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1309 AMDGPU_UCODE_ID_DMCUB;
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1312 adev->firmware.fw_size +=
1313 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1315 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1316 adev->dm.dmcub_fw_version);
1319 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1321 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1322 dmub_srv = adev->dm.dmub_srv;
1325 DRM_ERROR("Failed to allocate DMUB service!\n");
1329 memset(&create_params, 0, sizeof(create_params));
1330 create_params.user_ctx = adev;
1331 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1332 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1333 create_params.asic = dmub_asic;
1335 /* Create the DMUB service. */
1336 status = dmub_srv_create(dmub_srv, &create_params);
1337 if (status != DMUB_STATUS_OK) {
1338 DRM_ERROR("Error creating DMUB service: %d\n", status);
1342 /* Calculate the size of all the regions for the DMUB service. */
1343 memset(®ion_params, 0, sizeof(region_params));
1345 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1346 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1347 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1348 region_params.vbios_size = adev->bios_size;
1349 region_params.fw_bss_data = region_params.bss_data_size ?
1350 adev->dm.dmub_fw->data +
1351 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1352 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1353 region_params.fw_inst_const =
1354 adev->dm.dmub_fw->data +
1355 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1358 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1361 if (status != DMUB_STATUS_OK) {
1362 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1367 * Allocate a framebuffer based on the total size of all the regions.
1368 * TODO: Move this into GART.
1370 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1371 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1372 &adev->dm.dmub_bo_gpu_addr,
1373 &adev->dm.dmub_bo_cpu_addr);
1377 /* Rebase the regions on the framebuffer address. */
1378 memset(&fb_params, 0, sizeof(fb_params));
1379 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1380 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1381 fb_params.region_info = ®ion_info;
1383 adev->dm.dmub_fb_info =
1384 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1385 fb_info = adev->dm.dmub_fb_info;
1389 "Failed to allocate framebuffer info for DMUB service!\n");
1393 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1394 if (status != DMUB_STATUS_OK) {
1395 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1402 static int dm_sw_init(void *handle)
1404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1407 r = dm_dmub_sw_init(adev);
1411 return load_dmcu_fw(adev);
1414 static int dm_sw_fini(void *handle)
1416 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1418 kfree(adev->dm.dmub_fb_info);
1419 adev->dm.dmub_fb_info = NULL;
1421 if (adev->dm.dmub_srv) {
1422 dmub_srv_destroy(adev->dm.dmub_srv);
1423 adev->dm.dmub_srv = NULL;
1426 release_firmware(adev->dm.dmub_fw);
1427 adev->dm.dmub_fw = NULL;
1429 release_firmware(adev->dm.fw_dmcu);
1430 adev->dm.fw_dmcu = NULL;
1435 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1437 struct amdgpu_dm_connector *aconnector;
1438 struct drm_connector *connector;
1439 struct drm_connector_list_iter iter;
1442 drm_connector_list_iter_begin(dev, &iter);
1443 drm_for_each_connector_iter(connector, &iter) {
1444 aconnector = to_amdgpu_dm_connector(connector);
1445 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1446 aconnector->mst_mgr.aux) {
1447 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1449 aconnector->base.base.id);
1451 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1453 DRM_ERROR("DM_MST: Failed to start MST\n");
1454 aconnector->dc_link->type =
1455 dc_connection_single;
1460 drm_connector_list_iter_end(&iter);
1465 static int dm_late_init(void *handle)
1467 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1469 struct dmcu_iram_parameters params;
1470 unsigned int linear_lut[16];
1472 struct dmcu *dmcu = NULL;
1475 dmcu = adev->dm.dc->res_pool->dmcu;
1477 for (i = 0; i < 16; i++)
1478 linear_lut[i] = 0xFFFF * i / 15;
1481 params.backlight_ramping_start = 0xCCCC;
1482 params.backlight_ramping_reduction = 0xCCCCCCCC;
1483 params.backlight_lut_array_size = 16;
1484 params.backlight_lut_array = linear_lut;
1486 /* Min backlight level after ABM reduction, Don't allow below 1%
1487 * 0xFFFF x 0.01 = 0x28F
1489 params.min_abm_backlight = 0x28F;
1491 /* In the case where abm is implemented on dmcub,
1492 * dmcu object will be null.
1493 * ABM 2.4 and up are implemented on dmcub.
1496 ret = dmcu_load_iram(dmcu, params);
1497 else if (adev->dm.dc->ctx->dmub_srv)
1498 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1503 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1506 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1508 struct amdgpu_dm_connector *aconnector;
1509 struct drm_connector *connector;
1510 struct drm_connector_list_iter iter;
1511 struct drm_dp_mst_topology_mgr *mgr;
1513 bool need_hotplug = false;
1515 drm_connector_list_iter_begin(dev, &iter);
1516 drm_for_each_connector_iter(connector, &iter) {
1517 aconnector = to_amdgpu_dm_connector(connector);
1518 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1519 aconnector->mst_port)
1522 mgr = &aconnector->mst_mgr;
1525 drm_dp_mst_topology_mgr_suspend(mgr);
1527 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1529 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1530 need_hotplug = true;
1534 drm_connector_list_iter_end(&iter);
1537 drm_kms_helper_hotplug_event(dev);
1540 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1542 struct smu_context *smu = &adev->smu;
1545 if (!is_support_sw_smu(adev))
1548 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1549 * on window driver dc implementation.
1550 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1551 * should be passed to smu during boot up and resume from s3.
1552 * boot up: dc calculate dcn watermark clock settings within dc_create,
1553 * dcn20_resource_construct
1554 * then call pplib functions below to pass the settings to smu:
1555 * smu_set_watermarks_for_clock_ranges
1556 * smu_set_watermarks_table
1557 * navi10_set_watermarks_table
1558 * smu_write_watermarks_table
1560 * For Renoir, clock settings of dcn watermark are also fixed values.
1561 * dc has implemented different flow for window driver:
1562 * dc_hardware_init / dc_set_power_state
1567 * smu_set_watermarks_for_clock_ranges
1568 * renoir_set_watermarks_table
1569 * smu_write_watermarks_table
1572 * dc_hardware_init -> amdgpu_dm_init
1573 * dc_set_power_state --> dm_resume
1575 * therefore, this function apply to navi10/12/14 but not Renoir
1578 switch(adev->asic_type) {
1587 ret = smu_write_watermarks_table(smu);
1589 DRM_ERROR("Failed to update WMTABLE!\n");
1597 * dm_hw_init() - Initialize DC device
1598 * @handle: The base driver device containing the amdgpu_dm device.
1600 * Initialize the &struct amdgpu_display_manager device. This involves calling
1601 * the initializers of each DM component, then populating the struct with them.
1603 * Although the function implies hardware initialization, both hardware and
1604 * software are initialized here. Splitting them out to their relevant init
1605 * hooks is a future TODO item.
1607 * Some notable things that are initialized here:
1609 * - Display Core, both software and hardware
1610 * - DC modules that we need (freesync and color management)
1611 * - DRM software states
1612 * - Interrupt sources and handlers
1614 * - Debug FS entries, if enabled
1616 static int dm_hw_init(void *handle)
1618 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1619 /* Create DAL display manager */
1620 amdgpu_dm_init(adev);
1621 amdgpu_dm_hpd_init(adev);
1627 * dm_hw_fini() - Teardown DC device
1628 * @handle: The base driver device containing the amdgpu_dm device.
1630 * Teardown components within &struct amdgpu_display_manager that require
1631 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1632 * were loaded. Also flush IRQ workqueues and disable them.
1634 static int dm_hw_fini(void *handle)
1636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638 amdgpu_dm_hpd_fini(adev);
1640 amdgpu_dm_irq_fini(adev);
1641 amdgpu_dm_fini(adev);
1646 static int dm_enable_vblank(struct drm_crtc *crtc);
1647 static void dm_disable_vblank(struct drm_crtc *crtc);
1649 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1650 struct dc_state *state, bool enable)
1652 enum dc_irq_source irq_source;
1653 struct amdgpu_crtc *acrtc;
1657 for (i = 0; i < state->stream_count; i++) {
1658 acrtc = get_crtc_by_otg_inst(
1659 adev, state->stream_status[i].primary_otg_inst);
1661 if (acrtc && state->stream_status[i].plane_count != 0) {
1662 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1663 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1664 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1665 acrtc->crtc_id, enable ? "en" : "dis", rc);
1667 DRM_WARN("Failed to %s pflip interrupts\n",
1668 enable ? "enable" : "disable");
1671 rc = dm_enable_vblank(&acrtc->base);
1673 DRM_WARN("Failed to enable vblank interrupts\n");
1675 dm_disable_vblank(&acrtc->base);
1683 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1685 struct dc_state *context = NULL;
1686 enum dc_status res = DC_ERROR_UNEXPECTED;
1688 struct dc_stream_state *del_streams[MAX_PIPES];
1689 int del_streams_count = 0;
1691 memset(del_streams, 0, sizeof(del_streams));
1693 context = dc_create_state(dc);
1694 if (context == NULL)
1695 goto context_alloc_fail;
1697 dc_resource_state_copy_construct_current(dc, context);
1699 /* First remove from context all streams */
1700 for (i = 0; i < context->stream_count; i++) {
1701 struct dc_stream_state *stream = context->streams[i];
1703 del_streams[del_streams_count++] = stream;
1706 /* Remove all planes for removed streams and then remove the streams */
1707 for (i = 0; i < del_streams_count; i++) {
1708 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1709 res = DC_FAIL_DETACH_SURFACES;
1713 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1719 res = dc_validate_global_state(dc, context, false);
1722 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1726 res = dc_commit_state(dc, context);
1729 dc_release_state(context);
1735 static int dm_suspend(void *handle)
1737 struct amdgpu_device *adev = handle;
1738 struct amdgpu_display_manager *dm = &adev->dm;
1741 if (amdgpu_in_reset(adev)) {
1742 mutex_lock(&dm->dc_lock);
1743 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1745 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1747 amdgpu_dm_commit_zero_streams(dm->dc);
1749 amdgpu_dm_irq_suspend(adev);
1754 WARN_ON(adev->dm.cached_state);
1755 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1757 s3_handle_mst(adev_to_drm(adev), true);
1759 amdgpu_dm_irq_suspend(adev);
1762 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1767 static struct amdgpu_dm_connector *
1768 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1769 struct drm_crtc *crtc)
1772 struct drm_connector_state *new_con_state;
1773 struct drm_connector *connector;
1774 struct drm_crtc *crtc_from_state;
1776 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1777 crtc_from_state = new_con_state->crtc;
1779 if (crtc_from_state == crtc)
1780 return to_amdgpu_dm_connector(connector);
1786 static void emulated_link_detect(struct dc_link *link)
1788 struct dc_sink_init_data sink_init_data = { 0 };
1789 struct display_sink_capability sink_caps = { 0 };
1790 enum dc_edid_status edid_status;
1791 struct dc_context *dc_ctx = link->ctx;
1792 struct dc_sink *sink = NULL;
1793 struct dc_sink *prev_sink = NULL;
1795 link->type = dc_connection_none;
1796 prev_sink = link->local_sink;
1798 if (prev_sink != NULL)
1799 dc_sink_retain(prev_sink);
1801 switch (link->connector_signal) {
1802 case SIGNAL_TYPE_HDMI_TYPE_A: {
1803 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1808 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1809 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1814 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1815 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1820 case SIGNAL_TYPE_LVDS: {
1821 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1822 sink_caps.signal = SIGNAL_TYPE_LVDS;
1826 case SIGNAL_TYPE_EDP: {
1827 sink_caps.transaction_type =
1828 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1829 sink_caps.signal = SIGNAL_TYPE_EDP;
1833 case SIGNAL_TYPE_DISPLAY_PORT: {
1834 sink_caps.transaction_type =
1835 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1836 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1841 DC_ERROR("Invalid connector type! signal:%d\n",
1842 link->connector_signal);
1846 sink_init_data.link = link;
1847 sink_init_data.sink_signal = sink_caps.signal;
1849 sink = dc_sink_create(&sink_init_data);
1851 DC_ERROR("Failed to create sink!\n");
1855 /* dc_sink_create returns a new reference */
1856 link->local_sink = sink;
1858 edid_status = dm_helpers_read_local_edid(
1863 if (edid_status != EDID_OK)
1864 DC_ERROR("Failed to read EDID");
1868 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1869 struct amdgpu_display_manager *dm)
1872 struct dc_surface_update surface_updates[MAX_SURFACES];
1873 struct dc_plane_info plane_infos[MAX_SURFACES];
1874 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1875 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1876 struct dc_stream_update stream_update;
1880 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1883 dm_error("Failed to allocate update bundle\n");
1887 for (k = 0; k < dc_state->stream_count; k++) {
1888 bundle->stream_update.stream = dc_state->streams[k];
1890 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1891 bundle->surface_updates[m].surface =
1892 dc_state->stream_status->plane_states[m];
1893 bundle->surface_updates[m].surface->force_full_update =
1896 dc_commit_updates_for_stream(
1897 dm->dc, bundle->surface_updates,
1898 dc_state->stream_status->plane_count,
1899 dc_state->streams[k], &bundle->stream_update, dc_state);
1908 static int dm_resume(void *handle)
1910 struct amdgpu_device *adev = handle;
1911 struct drm_device *ddev = adev_to_drm(adev);
1912 struct amdgpu_display_manager *dm = &adev->dm;
1913 struct amdgpu_dm_connector *aconnector;
1914 struct drm_connector *connector;
1915 struct drm_connector_list_iter iter;
1916 struct drm_crtc *crtc;
1917 struct drm_crtc_state *new_crtc_state;
1918 struct dm_crtc_state *dm_new_crtc_state;
1919 struct drm_plane *plane;
1920 struct drm_plane_state *new_plane_state;
1921 struct dm_plane_state *dm_new_plane_state;
1922 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1923 enum dc_connection_type new_connection_type = dc_connection_none;
1924 struct dc_state *dc_state;
1927 if (amdgpu_in_reset(adev)) {
1928 dc_state = dm->cached_dc_state;
1930 r = dm_dmub_hw_init(adev);
1932 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1934 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1937 amdgpu_dm_irq_resume_early(adev);
1939 for (i = 0; i < dc_state->stream_count; i++) {
1940 dc_state->streams[i]->mode_changed = true;
1941 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1942 dc_state->stream_status->plane_states[j]->update_flags.raw
1947 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1949 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1951 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1953 dc_release_state(dm->cached_dc_state);
1954 dm->cached_dc_state = NULL;
1956 amdgpu_dm_irq_resume_late(adev);
1958 mutex_unlock(&dm->dc_lock);
1962 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1963 dc_release_state(dm_state->context);
1964 dm_state->context = dc_create_state(dm->dc);
1965 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1966 dc_resource_state_construct(dm->dc, dm_state->context);
1968 /* Before powering on DC we need to re-initialize DMUB. */
1969 r = dm_dmub_hw_init(adev);
1971 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1973 /* power on hardware */
1974 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1976 /* program HPD filter */
1980 * early enable HPD Rx IRQ, should be done before set mode as short
1981 * pulse interrupts are used for MST
1983 amdgpu_dm_irq_resume_early(adev);
1985 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1986 s3_handle_mst(ddev, false);
1989 drm_connector_list_iter_begin(ddev, &iter);
1990 drm_for_each_connector_iter(connector, &iter) {
1991 aconnector = to_amdgpu_dm_connector(connector);
1994 * this is the case when traversing through already created
1995 * MST connectors, should be skipped
1997 if (aconnector->mst_port)
2000 mutex_lock(&aconnector->hpd_lock);
2001 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2002 DRM_ERROR("KMS: Failed to detect connector\n");
2004 if (aconnector->base.force && new_connection_type == dc_connection_none)
2005 emulated_link_detect(aconnector->dc_link);
2007 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2009 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2010 aconnector->fake_enable = false;
2012 if (aconnector->dc_sink)
2013 dc_sink_release(aconnector->dc_sink);
2014 aconnector->dc_sink = NULL;
2015 amdgpu_dm_update_connector_after_detect(aconnector);
2016 mutex_unlock(&aconnector->hpd_lock);
2018 drm_connector_list_iter_end(&iter);
2020 /* Force mode set in atomic commit */
2021 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2022 new_crtc_state->active_changed = true;
2025 * atomic_check is expected to create the dc states. We need to release
2026 * them here, since they were duplicated as part of the suspend
2029 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2030 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2031 if (dm_new_crtc_state->stream) {
2032 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2033 dc_stream_release(dm_new_crtc_state->stream);
2034 dm_new_crtc_state->stream = NULL;
2038 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2039 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2040 if (dm_new_plane_state->dc_state) {
2041 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2042 dc_plane_state_release(dm_new_plane_state->dc_state);
2043 dm_new_plane_state->dc_state = NULL;
2047 drm_atomic_helper_resume(ddev, dm->cached_state);
2049 dm->cached_state = NULL;
2051 amdgpu_dm_irq_resume_late(adev);
2053 amdgpu_dm_smu_write_watermarks_table(adev);
2061 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2062 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2063 * the base driver's device list to be initialized and torn down accordingly.
2065 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2068 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2070 .early_init = dm_early_init,
2071 .late_init = dm_late_init,
2072 .sw_init = dm_sw_init,
2073 .sw_fini = dm_sw_fini,
2074 .hw_init = dm_hw_init,
2075 .hw_fini = dm_hw_fini,
2076 .suspend = dm_suspend,
2077 .resume = dm_resume,
2078 .is_idle = dm_is_idle,
2079 .wait_for_idle = dm_wait_for_idle,
2080 .check_soft_reset = dm_check_soft_reset,
2081 .soft_reset = dm_soft_reset,
2082 .set_clockgating_state = dm_set_clockgating_state,
2083 .set_powergating_state = dm_set_powergating_state,
2086 const struct amdgpu_ip_block_version dm_ip_block =
2088 .type = AMD_IP_BLOCK_TYPE_DCE,
2092 .funcs = &amdgpu_dm_funcs,
2102 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2103 .fb_create = amdgpu_display_user_framebuffer_create,
2104 .output_poll_changed = drm_fb_helper_output_poll_changed,
2105 .atomic_check = amdgpu_dm_atomic_check,
2106 .atomic_commit = amdgpu_dm_atomic_commit,
2109 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2110 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2113 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2115 u32 max_cll, min_cll, max, min, q, r;
2116 struct amdgpu_dm_backlight_caps *caps;
2117 struct amdgpu_display_manager *dm;
2118 struct drm_connector *conn_base;
2119 struct amdgpu_device *adev;
2120 struct dc_link *link = NULL;
2121 static const u8 pre_computed_values[] = {
2122 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2123 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2125 if (!aconnector || !aconnector->dc_link)
2128 link = aconnector->dc_link;
2129 if (link->connector_signal != SIGNAL_TYPE_EDP)
2132 conn_base = &aconnector->base;
2133 adev = drm_to_adev(conn_base->dev);
2135 caps = &dm->backlight_caps;
2136 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2137 caps->aux_support = false;
2138 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2139 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2141 if (caps->ext_caps->bits.oled == 1 ||
2142 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2143 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2144 caps->aux_support = true;
2146 /* From the specification (CTA-861-G), for calculating the maximum
2147 * luminance we need to use:
2148 * Luminance = 50*2**(CV/32)
2149 * Where CV is a one-byte value.
2150 * For calculating this expression we may need float point precision;
2151 * to avoid this complexity level, we take advantage that CV is divided
2152 * by a constant. From the Euclids division algorithm, we know that CV
2153 * can be written as: CV = 32*q + r. Next, we replace CV in the
2154 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2155 * need to pre-compute the value of r/32. For pre-computing the values
2156 * We just used the following Ruby line:
2157 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2158 * The results of the above expressions can be verified at
2159 * pre_computed_values.
2163 max = (1 << q) * pre_computed_values[r];
2165 // min luminance: maxLum * (CV/255)^2 / 100
2166 q = DIV_ROUND_CLOSEST(min_cll, 255);
2167 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2169 caps->aux_max_input_signal = max;
2170 caps->aux_min_input_signal = min;
2173 void amdgpu_dm_update_connector_after_detect(
2174 struct amdgpu_dm_connector *aconnector)
2176 struct drm_connector *connector = &aconnector->base;
2177 struct drm_device *dev = connector->dev;
2178 struct dc_sink *sink;
2180 /* MST handled by drm_mst framework */
2181 if (aconnector->mst_mgr.mst_state == true)
2184 sink = aconnector->dc_link->local_sink;
2186 dc_sink_retain(sink);
2189 * Edid mgmt connector gets first update only in mode_valid hook and then
2190 * the connector sink is set to either fake or physical sink depends on link status.
2191 * Skip if already done during boot.
2193 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2194 && aconnector->dc_em_sink) {
2197 * For S3 resume with headless use eml_sink to fake stream
2198 * because on resume connector->sink is set to NULL
2200 mutex_lock(&dev->mode_config.mutex);
2203 if (aconnector->dc_sink) {
2204 amdgpu_dm_update_freesync_caps(connector, NULL);
2206 * retain and release below are used to
2207 * bump up refcount for sink because the link doesn't point
2208 * to it anymore after disconnect, so on next crtc to connector
2209 * reshuffle by UMD we will get into unwanted dc_sink release
2211 dc_sink_release(aconnector->dc_sink);
2213 aconnector->dc_sink = sink;
2214 dc_sink_retain(aconnector->dc_sink);
2215 amdgpu_dm_update_freesync_caps(connector,
2218 amdgpu_dm_update_freesync_caps(connector, NULL);
2219 if (!aconnector->dc_sink) {
2220 aconnector->dc_sink = aconnector->dc_em_sink;
2221 dc_sink_retain(aconnector->dc_sink);
2225 mutex_unlock(&dev->mode_config.mutex);
2228 dc_sink_release(sink);
2233 * TODO: temporary guard to look for proper fix
2234 * if this sink is MST sink, we should not do anything
2236 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2237 dc_sink_release(sink);
2241 if (aconnector->dc_sink == sink) {
2243 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2246 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2247 aconnector->connector_id);
2249 dc_sink_release(sink);
2253 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2254 aconnector->connector_id, aconnector->dc_sink, sink);
2256 mutex_lock(&dev->mode_config.mutex);
2259 * 1. Update status of the drm connector
2260 * 2. Send an event and let userspace tell us what to do
2264 * TODO: check if we still need the S3 mode update workaround.
2265 * If yes, put it here.
2267 if (aconnector->dc_sink)
2268 amdgpu_dm_update_freesync_caps(connector, NULL);
2270 aconnector->dc_sink = sink;
2271 dc_sink_retain(aconnector->dc_sink);
2272 if (sink->dc_edid.length == 0) {
2273 aconnector->edid = NULL;
2274 if (aconnector->dc_link->aux_mode) {
2275 drm_dp_cec_unset_edid(
2276 &aconnector->dm_dp_aux.aux);
2280 (struct edid *)sink->dc_edid.raw_edid;
2282 drm_connector_update_edid_property(connector,
2284 drm_add_edid_modes(connector, aconnector->edid);
2286 if (aconnector->dc_link->aux_mode)
2287 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2291 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2292 update_connector_ext_caps(aconnector);
2294 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2295 amdgpu_dm_update_freesync_caps(connector, NULL);
2296 drm_connector_update_edid_property(connector, NULL);
2297 aconnector->num_modes = 0;
2298 dc_sink_release(aconnector->dc_sink);
2299 aconnector->dc_sink = NULL;
2300 aconnector->edid = NULL;
2301 #ifdef CONFIG_DRM_AMD_DC_HDCP
2302 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2303 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2304 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2308 mutex_unlock(&dev->mode_config.mutex);
2310 update_subconnector_property(aconnector);
2313 dc_sink_release(sink);
2316 static void handle_hpd_irq(void *param)
2318 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2319 struct drm_connector *connector = &aconnector->base;
2320 struct drm_device *dev = connector->dev;
2321 enum dc_connection_type new_connection_type = dc_connection_none;
2322 #ifdef CONFIG_DRM_AMD_DC_HDCP
2323 struct amdgpu_device *adev = drm_to_adev(dev);
2327 * In case of failure or MST no need to update connector status or notify the OS
2328 * since (for MST case) MST does this in its own context.
2330 mutex_lock(&aconnector->hpd_lock);
2332 #ifdef CONFIG_DRM_AMD_DC_HDCP
2333 if (adev->dm.hdcp_workqueue)
2334 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2336 if (aconnector->fake_enable)
2337 aconnector->fake_enable = false;
2339 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2340 DRM_ERROR("KMS: Failed to detect connector\n");
2342 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2343 emulated_link_detect(aconnector->dc_link);
2346 drm_modeset_lock_all(dev);
2347 dm_restore_drm_connector_state(dev, connector);
2348 drm_modeset_unlock_all(dev);
2350 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2351 drm_kms_helper_hotplug_event(dev);
2353 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2354 amdgpu_dm_update_connector_after_detect(aconnector);
2357 drm_modeset_lock_all(dev);
2358 dm_restore_drm_connector_state(dev, connector);
2359 drm_modeset_unlock_all(dev);
2361 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2362 drm_kms_helper_hotplug_event(dev);
2364 mutex_unlock(&aconnector->hpd_lock);
2368 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2370 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2372 bool new_irq_handled = false;
2374 int dpcd_bytes_to_read;
2376 const int max_process_count = 30;
2377 int process_count = 0;
2379 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2381 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2382 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2383 /* DPCD 0x200 - 0x201 for downstream IRQ */
2384 dpcd_addr = DP_SINK_COUNT;
2386 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2387 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2388 dpcd_addr = DP_SINK_COUNT_ESI;
2391 dret = drm_dp_dpcd_read(
2392 &aconnector->dm_dp_aux.aux,
2395 dpcd_bytes_to_read);
2397 while (dret == dpcd_bytes_to_read &&
2398 process_count < max_process_count) {
2404 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2405 /* handle HPD short pulse irq */
2406 if (aconnector->mst_mgr.mst_state)
2408 &aconnector->mst_mgr,
2412 if (new_irq_handled) {
2413 /* ACK at DPCD to notify down stream */
2414 const int ack_dpcd_bytes_to_write =
2415 dpcd_bytes_to_read - 1;
2417 for (retry = 0; retry < 3; retry++) {
2420 wret = drm_dp_dpcd_write(
2421 &aconnector->dm_dp_aux.aux,
2424 ack_dpcd_bytes_to_write);
2425 if (wret == ack_dpcd_bytes_to_write)
2429 /* check if there is new irq to be handled */
2430 dret = drm_dp_dpcd_read(
2431 &aconnector->dm_dp_aux.aux,
2434 dpcd_bytes_to_read);
2436 new_irq_handled = false;
2442 if (process_count == max_process_count)
2443 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2446 static void handle_hpd_rx_irq(void *param)
2448 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2449 struct drm_connector *connector = &aconnector->base;
2450 struct drm_device *dev = connector->dev;
2451 struct dc_link *dc_link = aconnector->dc_link;
2452 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2453 enum dc_connection_type new_connection_type = dc_connection_none;
2454 #ifdef CONFIG_DRM_AMD_DC_HDCP
2455 union hpd_irq_data hpd_irq_data;
2456 struct amdgpu_device *adev = drm_to_adev(dev);
2458 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2462 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2463 * conflict, after implement i2c helper, this mutex should be
2466 if (dc_link->type != dc_connection_mst_branch)
2467 mutex_lock(&aconnector->hpd_lock);
2470 #ifdef CONFIG_DRM_AMD_DC_HDCP
2471 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2473 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2475 !is_mst_root_connector) {
2476 /* Downstream Port status changed. */
2477 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2478 DRM_ERROR("KMS: Failed to detect connector\n");
2480 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2481 emulated_link_detect(dc_link);
2483 if (aconnector->fake_enable)
2484 aconnector->fake_enable = false;
2486 amdgpu_dm_update_connector_after_detect(aconnector);
2489 drm_modeset_lock_all(dev);
2490 dm_restore_drm_connector_state(dev, connector);
2491 drm_modeset_unlock_all(dev);
2493 drm_kms_helper_hotplug_event(dev);
2494 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2496 if (aconnector->fake_enable)
2497 aconnector->fake_enable = false;
2499 amdgpu_dm_update_connector_after_detect(aconnector);
2502 drm_modeset_lock_all(dev);
2503 dm_restore_drm_connector_state(dev, connector);
2504 drm_modeset_unlock_all(dev);
2506 drm_kms_helper_hotplug_event(dev);
2509 #ifdef CONFIG_DRM_AMD_DC_HDCP
2510 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2511 if (adev->dm.hdcp_workqueue)
2512 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2515 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2516 (dc_link->type == dc_connection_mst_branch))
2517 dm_handle_hpd_rx_irq(aconnector);
2519 if (dc_link->type != dc_connection_mst_branch) {
2520 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2521 mutex_unlock(&aconnector->hpd_lock);
2525 static void register_hpd_handlers(struct amdgpu_device *adev)
2527 struct drm_device *dev = adev_to_drm(adev);
2528 struct drm_connector *connector;
2529 struct amdgpu_dm_connector *aconnector;
2530 const struct dc_link *dc_link;
2531 struct dc_interrupt_params int_params = {0};
2533 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2534 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2536 list_for_each_entry(connector,
2537 &dev->mode_config.connector_list, head) {
2539 aconnector = to_amdgpu_dm_connector(connector);
2540 dc_link = aconnector->dc_link;
2542 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2543 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2544 int_params.irq_source = dc_link->irq_source_hpd;
2546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548 (void *) aconnector);
2551 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2553 /* Also register for DP short pulse (hpd_rx). */
2554 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2555 int_params.irq_source = dc_link->irq_source_hpd_rx;
2557 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2559 (void *) aconnector);
2564 #if defined(CONFIG_DRM_AMD_DC_SI)
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2568 struct dc *dc = adev->dm.dc;
2569 struct common_irq_params *c_irq_params;
2570 struct dc_interrupt_params int_params = {0};
2573 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2575 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2576 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2579 * Actions of amdgpu_irq_add_id():
2580 * 1. Register a set() function with base driver.
2581 * Base driver will call set() function to enable/disable an
2582 * interrupt in DC hardware.
2583 * 2. Register amdgpu_dm_irq_handler().
2584 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2585 * coming from DC hardware.
2586 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2587 * for acknowledging and handling. */
2589 /* Use VBLANK interrupt */
2590 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2591 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2593 DRM_ERROR("Failed to add crtc irq id!\n");
2597 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2598 int_params.irq_source =
2599 dc_interrupt_to_irq_source(dc, i+1 , 0);
2601 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2603 c_irq_params->adev = adev;
2604 c_irq_params->irq_src = int_params.irq_source;
2606 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2607 dm_crtc_high_irq, c_irq_params);
2610 /* Use GRPH_PFLIP interrupt */
2611 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2612 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2613 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2615 DRM_ERROR("Failed to add page flip irq id!\n");
2619 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2620 int_params.irq_source =
2621 dc_interrupt_to_irq_source(dc, i, 0);
2623 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2625 c_irq_params->adev = adev;
2626 c_irq_params->irq_src = int_params.irq_source;
2628 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2629 dm_pflip_high_irq, c_irq_params);
2634 r = amdgpu_irq_add_id(adev, client_id,
2635 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2637 DRM_ERROR("Failed to add hpd irq id!\n");
2641 register_hpd_handlers(adev);
2647 /* Register IRQ sources and initialize IRQ callbacks */
2648 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2650 struct dc *dc = adev->dm.dc;
2651 struct common_irq_params *c_irq_params;
2652 struct dc_interrupt_params int_params = {0};
2655 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2657 if (adev->asic_type >= CHIP_VEGA10)
2658 client_id = SOC15_IH_CLIENTID_DCE;
2660 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2661 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2664 * Actions of amdgpu_irq_add_id():
2665 * 1. Register a set() function with base driver.
2666 * Base driver will call set() function to enable/disable an
2667 * interrupt in DC hardware.
2668 * 2. Register amdgpu_dm_irq_handler().
2669 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2670 * coming from DC hardware.
2671 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2672 * for acknowledging and handling. */
2674 /* Use VBLANK interrupt */
2675 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2676 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2678 DRM_ERROR("Failed to add crtc irq id!\n");
2682 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683 int_params.irq_source =
2684 dc_interrupt_to_irq_source(dc, i, 0);
2686 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2688 c_irq_params->adev = adev;
2689 c_irq_params->irq_src = int_params.irq_source;
2691 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 dm_crtc_high_irq, c_irq_params);
2695 /* Use VUPDATE interrupt */
2696 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2697 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2699 DRM_ERROR("Failed to add vupdate irq id!\n");
2703 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2704 int_params.irq_source =
2705 dc_interrupt_to_irq_source(dc, i, 0);
2707 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2709 c_irq_params->adev = adev;
2710 c_irq_params->irq_src = int_params.irq_source;
2712 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2713 dm_vupdate_high_irq, c_irq_params);
2716 /* Use GRPH_PFLIP interrupt */
2717 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2718 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2719 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2721 DRM_ERROR("Failed to add page flip irq id!\n");
2725 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2726 int_params.irq_source =
2727 dc_interrupt_to_irq_source(dc, i, 0);
2729 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2731 c_irq_params->adev = adev;
2732 c_irq_params->irq_src = int_params.irq_source;
2734 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2735 dm_pflip_high_irq, c_irq_params);
2740 r = amdgpu_irq_add_id(adev, client_id,
2741 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2743 DRM_ERROR("Failed to add hpd irq id!\n");
2747 register_hpd_handlers(adev);
2752 #if defined(CONFIG_DRM_AMD_DC_DCN)
2753 /* Register IRQ sources and initialize IRQ callbacks */
2754 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2756 struct dc *dc = adev->dm.dc;
2757 struct common_irq_params *c_irq_params;
2758 struct dc_interrupt_params int_params = {0};
2762 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2763 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2766 * Actions of amdgpu_irq_add_id():
2767 * 1. Register a set() function with base driver.
2768 * Base driver will call set() function to enable/disable an
2769 * interrupt in DC hardware.
2770 * 2. Register amdgpu_dm_irq_handler().
2771 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2772 * coming from DC hardware.
2773 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2774 * for acknowledging and handling.
2777 /* Use VSTARTUP interrupt */
2778 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2779 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2781 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2784 DRM_ERROR("Failed to add crtc irq id!\n");
2788 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2789 int_params.irq_source =
2790 dc_interrupt_to_irq_source(dc, i, 0);
2792 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2794 c_irq_params->adev = adev;
2795 c_irq_params->irq_src = int_params.irq_source;
2797 amdgpu_dm_irq_register_interrupt(
2798 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2801 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2802 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2803 * to trigger at end of each vblank, regardless of state of the lock,
2804 * matching DCE behaviour.
2806 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2807 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2809 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2812 DRM_ERROR("Failed to add vupdate irq id!\n");
2816 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2817 int_params.irq_source =
2818 dc_interrupt_to_irq_source(dc, i, 0);
2820 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2822 c_irq_params->adev = adev;
2823 c_irq_params->irq_src = int_params.irq_source;
2825 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2826 dm_vupdate_high_irq, c_irq_params);
2829 /* Use GRPH_PFLIP interrupt */
2830 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2831 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2833 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2835 DRM_ERROR("Failed to add page flip irq id!\n");
2839 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2840 int_params.irq_source =
2841 dc_interrupt_to_irq_source(dc, i, 0);
2843 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2845 c_irq_params->adev = adev;
2846 c_irq_params->irq_src = int_params.irq_source;
2848 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2849 dm_pflip_high_irq, c_irq_params);
2854 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2857 DRM_ERROR("Failed to add hpd irq id!\n");
2861 register_hpd_handlers(adev);
2868 * Acquires the lock for the atomic state object and returns
2869 * the new atomic state.
2871 * This should only be called during atomic check.
2873 static int dm_atomic_get_state(struct drm_atomic_state *state,
2874 struct dm_atomic_state **dm_state)
2876 struct drm_device *dev = state->dev;
2877 struct amdgpu_device *adev = drm_to_adev(dev);
2878 struct amdgpu_display_manager *dm = &adev->dm;
2879 struct drm_private_state *priv_state;
2884 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2885 if (IS_ERR(priv_state))
2886 return PTR_ERR(priv_state);
2888 *dm_state = to_dm_atomic_state(priv_state);
2893 static struct dm_atomic_state *
2894 dm_atomic_get_new_state(struct drm_atomic_state *state)
2896 struct drm_device *dev = state->dev;
2897 struct amdgpu_device *adev = drm_to_adev(dev);
2898 struct amdgpu_display_manager *dm = &adev->dm;
2899 struct drm_private_obj *obj;
2900 struct drm_private_state *new_obj_state;
2903 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2904 if (obj->funcs == dm->atomic_obj.funcs)
2905 return to_dm_atomic_state(new_obj_state);
2911 static struct drm_private_state *
2912 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2914 struct dm_atomic_state *old_state, *new_state;
2916 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2920 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2922 old_state = to_dm_atomic_state(obj->state);
2924 if (old_state && old_state->context)
2925 new_state->context = dc_copy_state(old_state->context);
2927 if (!new_state->context) {
2932 return &new_state->base;
2935 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2936 struct drm_private_state *state)
2938 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2940 if (dm_state && dm_state->context)
2941 dc_release_state(dm_state->context);
2946 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2947 .atomic_duplicate_state = dm_atomic_duplicate_state,
2948 .atomic_destroy_state = dm_atomic_destroy_state,
2951 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2953 struct dm_atomic_state *state;
2956 adev->mode_info.mode_config_initialized = true;
2958 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2959 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2961 adev_to_drm(adev)->mode_config.max_width = 16384;
2962 adev_to_drm(adev)->mode_config.max_height = 16384;
2964 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2965 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2966 /* indicates support for immediate flip */
2967 adev_to_drm(adev)->mode_config.async_page_flip = true;
2969 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2971 state = kzalloc(sizeof(*state), GFP_KERNEL);
2975 state->context = dc_create_state(adev->dm.dc);
2976 if (!state->context) {
2981 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2983 drm_atomic_private_obj_init(adev_to_drm(adev),
2984 &adev->dm.atomic_obj,
2986 &dm_atomic_state_funcs);
2988 r = amdgpu_display_modeset_create_props(adev);
2990 dc_release_state(state->context);
2995 r = amdgpu_dm_audio_init(adev);
2997 dc_release_state(state->context);
3005 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3006 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3007 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3009 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3010 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3012 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3014 #if defined(CONFIG_ACPI)
3015 struct amdgpu_dm_backlight_caps caps;
3017 memset(&caps, 0, sizeof(caps));
3019 if (dm->backlight_caps.caps_valid)
3022 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3023 if (caps.caps_valid) {
3024 dm->backlight_caps.caps_valid = true;
3025 if (caps.aux_support)
3027 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3028 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3030 dm->backlight_caps.min_input_signal =
3031 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3032 dm->backlight_caps.max_input_signal =
3033 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3036 if (dm->backlight_caps.aux_support)
3039 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3040 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3044 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3051 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3052 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3057 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3058 unsigned *min, unsigned *max)
3063 if (caps->aux_support) {
3064 // Firmware limits are in nits, DC API wants millinits.
3065 *max = 1000 * caps->aux_max_input_signal;
3066 *min = 1000 * caps->aux_min_input_signal;
3068 // Firmware limits are 8-bit, PWM control is 16-bit.
3069 *max = 0x101 * caps->max_input_signal;
3070 *min = 0x101 * caps->min_input_signal;
3075 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3076 uint32_t brightness)
3080 if (!get_brightness_range(caps, &min, &max))
3083 // Rescale 0..255 to min..max
3084 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3085 AMDGPU_MAX_BL_LEVEL);
3088 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3089 uint32_t brightness)
3093 if (!get_brightness_range(caps, &min, &max))
3096 if (brightness < min)
3098 // Rescale min..max to 0..255
3099 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3103 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3105 struct amdgpu_display_manager *dm = bl_get_data(bd);
3106 struct amdgpu_dm_backlight_caps caps;
3107 struct dc_link *link = NULL;
3111 amdgpu_dm_update_backlight_caps(dm);
3112 caps = dm->backlight_caps;
3114 link = (struct dc_link *)dm->backlight_link;
3116 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3117 // Change brightness based on AUX property
3118 if (caps.aux_support)
3119 return set_backlight_via_aux(link, brightness);
3121 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3126 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3128 struct amdgpu_display_manager *dm = bl_get_data(bd);
3129 int ret = dc_link_get_backlight_level(dm->backlight_link);
3131 if (ret == DC_ERROR_UNEXPECTED)
3132 return bd->props.brightness;
3133 return convert_brightness_to_user(&dm->backlight_caps, ret);
3136 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3137 .options = BL_CORE_SUSPENDRESUME,
3138 .get_brightness = amdgpu_dm_backlight_get_brightness,
3139 .update_status = amdgpu_dm_backlight_update_status,
3143 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3146 struct backlight_properties props = { 0 };
3148 amdgpu_dm_update_backlight_caps(dm);
3150 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3151 props.brightness = AMDGPU_MAX_BL_LEVEL;
3152 props.type = BACKLIGHT_RAW;
3154 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3155 adev_to_drm(dm->adev)->primary->index);
3157 dm->backlight_dev = backlight_device_register(bl_name,
3158 adev_to_drm(dm->adev)->dev,
3160 &amdgpu_dm_backlight_ops,
3163 if (IS_ERR(dm->backlight_dev))
3164 DRM_ERROR("DM: Backlight registration failed!\n");
3166 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3171 static int initialize_plane(struct amdgpu_display_manager *dm,
3172 struct amdgpu_mode_info *mode_info, int plane_id,
3173 enum drm_plane_type plane_type,
3174 const struct dc_plane_cap *plane_cap)
3176 struct drm_plane *plane;
3177 unsigned long possible_crtcs;
3180 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3182 DRM_ERROR("KMS: Failed to allocate plane\n");
3185 plane->type = plane_type;
3188 * HACK: IGT tests expect that the primary plane for a CRTC
3189 * can only have one possible CRTC. Only expose support for
3190 * any CRTC if they're not going to be used as a primary plane
3191 * for a CRTC - like overlay or underlay planes.
3193 possible_crtcs = 1 << plane_id;
3194 if (plane_id >= dm->dc->caps.max_streams)
3195 possible_crtcs = 0xff;
3197 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3200 DRM_ERROR("KMS: Failed to initialize plane\n");
3206 mode_info->planes[plane_id] = plane;
3212 static void register_backlight_device(struct amdgpu_display_manager *dm,
3213 struct dc_link *link)
3215 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3216 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3218 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3219 link->type != dc_connection_none) {
3221 * Event if registration failed, we should continue with
3222 * DM initialization because not having a backlight control
3223 * is better then a black screen.
3225 amdgpu_dm_register_backlight_device(dm);
3227 if (dm->backlight_dev)
3228 dm->backlight_link = link;
3235 * In this architecture, the association
3236 * connector -> encoder -> crtc
3237 * id not really requried. The crtc and connector will hold the
3238 * display_index as an abstraction to use with DAL component
3240 * Returns 0 on success
3242 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3244 struct amdgpu_display_manager *dm = &adev->dm;
3246 struct amdgpu_dm_connector *aconnector = NULL;
3247 struct amdgpu_encoder *aencoder = NULL;
3248 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3250 int32_t primary_planes;
3251 enum dc_connection_type new_connection_type = dc_connection_none;
3252 const struct dc_plane_cap *plane;
3254 link_cnt = dm->dc->caps.max_links;
3255 if (amdgpu_dm_mode_config_init(dm->adev)) {
3256 DRM_ERROR("DM: Failed to initialize mode config\n");
3260 /* There is one primary plane per CRTC */
3261 primary_planes = dm->dc->caps.max_streams;
3262 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3265 * Initialize primary planes, implicit planes for legacy IOCTLS.
3266 * Order is reversed to match iteration order in atomic check.
3268 for (i = (primary_planes - 1); i >= 0; i--) {
3269 plane = &dm->dc->caps.planes[i];
3271 if (initialize_plane(dm, mode_info, i,
3272 DRM_PLANE_TYPE_PRIMARY, plane)) {
3273 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3279 * Initialize overlay planes, index starting after primary planes.
3280 * These planes have a higher DRM index than the primary planes since
3281 * they should be considered as having a higher z-order.
3282 * Order is reversed to match iteration order in atomic check.
3284 * Only support DCN for now, and only expose one so we don't encourage
3285 * userspace to use up all the pipes.
3287 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3288 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3290 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3293 if (!plane->blends_with_above || !plane->blends_with_below)
3296 if (!plane->pixel_format_support.argb8888)
3299 if (initialize_plane(dm, NULL, primary_planes + i,
3300 DRM_PLANE_TYPE_OVERLAY, plane)) {
3301 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3305 /* Only create one overlay plane. */
3309 for (i = 0; i < dm->dc->caps.max_streams; i++)
3310 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3311 DRM_ERROR("KMS: Failed to initialize crtc\n");
3315 dm->display_indexes_num = dm->dc->caps.max_streams;
3317 /* loops over all connectors on the board */
3318 for (i = 0; i < link_cnt; i++) {
3319 struct dc_link *link = NULL;
3321 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3323 "KMS: Cannot support more than %d display indexes\n",
3324 AMDGPU_DM_MAX_DISPLAY_INDEX);
3328 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3332 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3336 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3337 DRM_ERROR("KMS: Failed to initialize encoder\n");
3341 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3342 DRM_ERROR("KMS: Failed to initialize connector\n");
3346 link = dc_get_link_at_index(dm->dc, i);
3348 if (!dc_link_detect_sink(link, &new_connection_type))
3349 DRM_ERROR("KMS: Failed to detect connector\n");
3351 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3352 emulated_link_detect(link);
3353 amdgpu_dm_update_connector_after_detect(aconnector);
3355 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3356 amdgpu_dm_update_connector_after_detect(aconnector);
3357 register_backlight_device(dm, link);
3358 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3359 amdgpu_dm_set_psr_caps(link);
3365 /* Software is initialized. Now we can register interrupt handlers. */
3366 switch (adev->asic_type) {
3367 #if defined(CONFIG_DRM_AMD_DC_SI)
3372 if (dce60_register_irq_handlers(dm->adev)) {
3373 DRM_ERROR("DM: Failed to initialize IRQ\n");
3387 case CHIP_POLARIS11:
3388 case CHIP_POLARIS10:
3389 case CHIP_POLARIS12:
3394 if (dce110_register_irq_handlers(dm->adev)) {
3395 DRM_ERROR("DM: Failed to initialize IRQ\n");
3399 #if defined(CONFIG_DRM_AMD_DC_DCN)
3405 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3406 case CHIP_SIENNA_CICHLID:
3407 case CHIP_NAVY_FLOUNDER:
3409 if (dcn10_register_irq_handlers(dm->adev)) {
3410 DRM_ERROR("DM: Failed to initialize IRQ\n");
3416 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3428 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3430 drm_mode_config_cleanup(dm->ddev);
3431 drm_atomic_private_obj_fini(&dm->atomic_obj);
3435 /******************************************************************************
3436 * amdgpu_display_funcs functions
3437 *****************************************************************************/
3440 * dm_bandwidth_update - program display watermarks
3442 * @adev: amdgpu_device pointer
3444 * Calculate and program the display watermarks and line buffer allocation.
3446 static void dm_bandwidth_update(struct amdgpu_device *adev)
3448 /* TODO: implement later */
3451 static const struct amdgpu_display_funcs dm_display_funcs = {
3452 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3453 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3454 .backlight_set_level = NULL, /* never called for DC */
3455 .backlight_get_level = NULL, /* never called for DC */
3456 .hpd_sense = NULL,/* called unconditionally */
3457 .hpd_set_polarity = NULL, /* called unconditionally */
3458 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3459 .page_flip_get_scanoutpos =
3460 dm_crtc_get_scanoutpos,/* called unconditionally */
3461 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3462 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3465 #if defined(CONFIG_DEBUG_KERNEL_DC)
3467 static ssize_t s3_debug_store(struct device *device,
3468 struct device_attribute *attr,
3474 struct drm_device *drm_dev = dev_get_drvdata(device);
3475 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3477 ret = kstrtoint(buf, 0, &s3_state);
3482 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3487 return ret == 0 ? count : 0;
3490 DEVICE_ATTR_WO(s3_debug);
3494 static int dm_early_init(void *handle)
3496 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3498 switch (adev->asic_type) {
3499 #if defined(CONFIG_DRM_AMD_DC_SI)
3503 adev->mode_info.num_crtc = 6;
3504 adev->mode_info.num_hpd = 6;
3505 adev->mode_info.num_dig = 6;
3508 adev->mode_info.num_crtc = 2;
3509 adev->mode_info.num_hpd = 2;
3510 adev->mode_info.num_dig = 2;
3515 adev->mode_info.num_crtc = 6;
3516 adev->mode_info.num_hpd = 6;
3517 adev->mode_info.num_dig = 6;
3520 adev->mode_info.num_crtc = 4;
3521 adev->mode_info.num_hpd = 6;
3522 adev->mode_info.num_dig = 7;
3526 adev->mode_info.num_crtc = 2;
3527 adev->mode_info.num_hpd = 6;
3528 adev->mode_info.num_dig = 6;
3532 adev->mode_info.num_crtc = 6;
3533 adev->mode_info.num_hpd = 6;
3534 adev->mode_info.num_dig = 7;
3537 adev->mode_info.num_crtc = 3;
3538 adev->mode_info.num_hpd = 6;
3539 adev->mode_info.num_dig = 9;
3542 adev->mode_info.num_crtc = 2;
3543 adev->mode_info.num_hpd = 6;
3544 adev->mode_info.num_dig = 9;
3546 case CHIP_POLARIS11:
3547 case CHIP_POLARIS12:
3548 adev->mode_info.num_crtc = 5;
3549 adev->mode_info.num_hpd = 5;
3550 adev->mode_info.num_dig = 5;
3552 case CHIP_POLARIS10:
3554 adev->mode_info.num_crtc = 6;
3555 adev->mode_info.num_hpd = 6;
3556 adev->mode_info.num_dig = 6;
3561 adev->mode_info.num_crtc = 6;
3562 adev->mode_info.num_hpd = 6;
3563 adev->mode_info.num_dig = 6;
3565 #if defined(CONFIG_DRM_AMD_DC_DCN)
3567 adev->mode_info.num_crtc = 4;
3568 adev->mode_info.num_hpd = 4;
3569 adev->mode_info.num_dig = 4;
3574 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3575 case CHIP_SIENNA_CICHLID:
3576 case CHIP_NAVY_FLOUNDER:
3578 adev->mode_info.num_crtc = 6;
3579 adev->mode_info.num_hpd = 6;
3580 adev->mode_info.num_dig = 6;
3583 adev->mode_info.num_crtc = 5;
3584 adev->mode_info.num_hpd = 5;
3585 adev->mode_info.num_dig = 5;
3588 adev->mode_info.num_crtc = 4;
3589 adev->mode_info.num_hpd = 4;
3590 adev->mode_info.num_dig = 4;
3593 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3597 amdgpu_dm_set_irq_funcs(adev);
3599 if (adev->mode_info.funcs == NULL)
3600 adev->mode_info.funcs = &dm_display_funcs;
3603 * Note: Do NOT change adev->audio_endpt_rreg and
3604 * adev->audio_endpt_wreg because they are initialised in
3605 * amdgpu_device_init()
3607 #if defined(CONFIG_DEBUG_KERNEL_DC)
3609 adev_to_drm(adev)->dev,
3610 &dev_attr_s3_debug);
3616 static bool modeset_required(struct drm_crtc_state *crtc_state,
3617 struct dc_stream_state *new_stream,
3618 struct dc_stream_state *old_stream)
3620 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3623 static bool modereset_required(struct drm_crtc_state *crtc_state)
3625 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3628 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3630 drm_encoder_cleanup(encoder);
3634 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3635 .destroy = amdgpu_dm_encoder_destroy,
3639 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3640 struct dc_scaling_info *scaling_info)
3642 int scale_w, scale_h;
3644 memset(scaling_info, 0, sizeof(*scaling_info));
3646 /* Source is fixed 16.16 but we ignore mantissa for now... */
3647 scaling_info->src_rect.x = state->src_x >> 16;
3648 scaling_info->src_rect.y = state->src_y >> 16;
3650 scaling_info->src_rect.width = state->src_w >> 16;
3651 if (scaling_info->src_rect.width == 0)
3654 scaling_info->src_rect.height = state->src_h >> 16;
3655 if (scaling_info->src_rect.height == 0)
3658 scaling_info->dst_rect.x = state->crtc_x;
3659 scaling_info->dst_rect.y = state->crtc_y;
3661 if (state->crtc_w == 0)
3664 scaling_info->dst_rect.width = state->crtc_w;
3666 if (state->crtc_h == 0)
3669 scaling_info->dst_rect.height = state->crtc_h;
3671 /* DRM doesn't specify clipping on destination output. */
3672 scaling_info->clip_rect = scaling_info->dst_rect;
3674 /* TODO: Validate scaling per-format with DC plane caps */
3675 scale_w = scaling_info->dst_rect.width * 1000 /
3676 scaling_info->src_rect.width;
3678 if (scale_w < 250 || scale_w > 16000)
3681 scale_h = scaling_info->dst_rect.height * 1000 /
3682 scaling_info->src_rect.height;
3684 if (scale_h < 250 || scale_h > 16000)
3688 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3689 * assume reasonable defaults based on the format.
3695 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3696 uint64_t *tiling_flags, bool *tmz_surface)
3698 struct amdgpu_bo *rbo;
3703 *tmz_surface = false;
3707 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3708 r = amdgpu_bo_reserve(rbo, false);
3711 /* Don't show error message when returning -ERESTARTSYS */
3712 if (r != -ERESTARTSYS)
3713 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3718 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3721 *tmz_surface = amdgpu_bo_encrypted(rbo);
3723 amdgpu_bo_unreserve(rbo);
3728 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3730 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3732 return offset ? (address + offset * 256) : 0;
3736 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3737 const struct amdgpu_framebuffer *afb,
3738 const enum surface_pixel_format format,
3739 const enum dc_rotation_angle rotation,
3740 const struct plane_size *plane_size,
3741 const union dc_tiling_info *tiling_info,
3742 const uint64_t info,
3743 struct dc_plane_dcc_param *dcc,
3744 struct dc_plane_address *address,
3745 bool force_disable_dcc)
3747 struct dc *dc = adev->dm.dc;
3748 struct dc_dcc_surface_param input;
3749 struct dc_surface_dcc_cap output;
3750 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3751 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3752 uint64_t dcc_address;
3754 memset(&input, 0, sizeof(input));
3755 memset(&output, 0, sizeof(output));
3757 if (force_disable_dcc)
3763 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3766 if (!dc->cap_funcs.get_dcc_compression_cap)
3769 input.format = format;
3770 input.surface_size.width = plane_size->surface_size.width;
3771 input.surface_size.height = plane_size->surface_size.height;
3772 input.swizzle_mode = tiling_info->gfx9.swizzle;
3774 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3775 input.scan = SCAN_DIRECTION_HORIZONTAL;
3776 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3777 input.scan = SCAN_DIRECTION_VERTICAL;
3779 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3782 if (!output.capable)
3785 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3790 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3791 dcc->independent_64b_blks = i64b;
3793 dcc_address = get_dcc_address(afb->address, info);
3794 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3795 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3801 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3802 const struct amdgpu_framebuffer *afb,
3803 const enum surface_pixel_format format,
3804 const enum dc_rotation_angle rotation,
3805 const uint64_t tiling_flags,
3806 union dc_tiling_info *tiling_info,
3807 struct plane_size *plane_size,
3808 struct dc_plane_dcc_param *dcc,
3809 struct dc_plane_address *address,
3811 bool force_disable_dcc)
3813 const struct drm_framebuffer *fb = &afb->base;
3816 memset(tiling_info, 0, sizeof(*tiling_info));
3817 memset(plane_size, 0, sizeof(*plane_size));
3818 memset(dcc, 0, sizeof(*dcc));
3819 memset(address, 0, sizeof(*address));
3821 address->tmz_surface = tmz_surface;
3823 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3824 plane_size->surface_size.x = 0;
3825 plane_size->surface_size.y = 0;
3826 plane_size->surface_size.width = fb->width;
3827 plane_size->surface_size.height = fb->height;
3828 plane_size->surface_pitch =
3829 fb->pitches[0] / fb->format->cpp[0];
3831 address->type = PLN_ADDR_TYPE_GRAPHICS;
3832 address->grph.addr.low_part = lower_32_bits(afb->address);
3833 address->grph.addr.high_part = upper_32_bits(afb->address);
3834 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3835 uint64_t chroma_addr = afb->address + fb->offsets[1];
3837 plane_size->surface_size.x = 0;
3838 plane_size->surface_size.y = 0;
3839 plane_size->surface_size.width = fb->width;
3840 plane_size->surface_size.height = fb->height;
3841 plane_size->surface_pitch =
3842 fb->pitches[0] / fb->format->cpp[0];
3844 plane_size->chroma_size.x = 0;
3845 plane_size->chroma_size.y = 0;
3846 /* TODO: set these based on surface format */
3847 plane_size->chroma_size.width = fb->width / 2;
3848 plane_size->chroma_size.height = fb->height / 2;
3850 plane_size->chroma_pitch =
3851 fb->pitches[1] / fb->format->cpp[1];
3853 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3854 address->video_progressive.luma_addr.low_part =
3855 lower_32_bits(afb->address);
3856 address->video_progressive.luma_addr.high_part =
3857 upper_32_bits(afb->address);
3858 address->video_progressive.chroma_addr.low_part =
3859 lower_32_bits(chroma_addr);
3860 address->video_progressive.chroma_addr.high_part =
3861 upper_32_bits(chroma_addr);
3864 /* Fill GFX8 params */
3865 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3866 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3868 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3869 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3870 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3871 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3872 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3874 /* XXX fix me for VI */
3875 tiling_info->gfx8.num_banks = num_banks;
3876 tiling_info->gfx8.array_mode =
3877 DC_ARRAY_2D_TILED_THIN1;
3878 tiling_info->gfx8.tile_split = tile_split;
3879 tiling_info->gfx8.bank_width = bankw;
3880 tiling_info->gfx8.bank_height = bankh;
3881 tiling_info->gfx8.tile_aspect = mtaspect;
3882 tiling_info->gfx8.tile_mode =
3883 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3884 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3885 == DC_ARRAY_1D_TILED_THIN1) {
3886 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3889 tiling_info->gfx8.pipe_config =
3890 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3892 if (adev->asic_type == CHIP_VEGA10 ||
3893 adev->asic_type == CHIP_VEGA12 ||
3894 adev->asic_type == CHIP_VEGA20 ||
3895 adev->asic_type == CHIP_NAVI10 ||
3896 adev->asic_type == CHIP_NAVI14 ||
3897 adev->asic_type == CHIP_NAVI12 ||
3898 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3899 adev->asic_type == CHIP_SIENNA_CICHLID ||
3900 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3902 adev->asic_type == CHIP_RENOIR ||
3903 adev->asic_type == CHIP_RAVEN) {
3904 /* Fill GFX9 params */
3905 tiling_info->gfx9.num_pipes =
3906 adev->gfx.config.gb_addr_config_fields.num_pipes;
3907 tiling_info->gfx9.num_banks =
3908 adev->gfx.config.gb_addr_config_fields.num_banks;
3909 tiling_info->gfx9.pipe_interleave =
3910 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911 tiling_info->gfx9.num_shader_engines =
3912 adev->gfx.config.gb_addr_config_fields.num_se;
3913 tiling_info->gfx9.max_compressed_frags =
3914 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915 tiling_info->gfx9.num_rb_per_se =
3916 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917 tiling_info->gfx9.swizzle =
3918 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3919 tiling_info->gfx9.shaderEnable = 1;
3921 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3922 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3923 adev->asic_type == CHIP_NAVY_FLOUNDER)
3924 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3926 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3927 plane_size, tiling_info,
3928 tiling_flags, dcc, address,
3938 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3939 bool *per_pixel_alpha, bool *global_alpha,
3940 int *global_alpha_value)
3942 *per_pixel_alpha = false;
3943 *global_alpha = false;
3944 *global_alpha_value = 0xff;
3946 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3949 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3950 static const uint32_t alpha_formats[] = {
3951 DRM_FORMAT_ARGB8888,
3952 DRM_FORMAT_RGBA8888,
3953 DRM_FORMAT_ABGR8888,
3955 uint32_t format = plane_state->fb->format->format;
3958 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3959 if (format == alpha_formats[i]) {
3960 *per_pixel_alpha = true;
3966 if (plane_state->alpha < 0xffff) {
3967 *global_alpha = true;
3968 *global_alpha_value = plane_state->alpha >> 8;
3973 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3974 const enum surface_pixel_format format,
3975 enum dc_color_space *color_space)
3979 *color_space = COLOR_SPACE_SRGB;
3981 /* DRM color properties only affect non-RGB formats. */
3982 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3985 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3987 switch (plane_state->color_encoding) {
3988 case DRM_COLOR_YCBCR_BT601:
3990 *color_space = COLOR_SPACE_YCBCR601;
3992 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3995 case DRM_COLOR_YCBCR_BT709:
3997 *color_space = COLOR_SPACE_YCBCR709;
3999 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4002 case DRM_COLOR_YCBCR_BT2020:
4004 *color_space = COLOR_SPACE_2020_YCBCR;
4017 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4018 const struct drm_plane_state *plane_state,
4019 const uint64_t tiling_flags,
4020 struct dc_plane_info *plane_info,
4021 struct dc_plane_address *address,
4023 bool force_disable_dcc)
4025 const struct drm_framebuffer *fb = plane_state->fb;
4026 const struct amdgpu_framebuffer *afb =
4027 to_amdgpu_framebuffer(plane_state->fb);
4028 struct drm_format_name_buf format_name;
4031 memset(plane_info, 0, sizeof(*plane_info));
4033 switch (fb->format->format) {
4035 plane_info->format =
4036 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4038 case DRM_FORMAT_RGB565:
4039 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4041 case DRM_FORMAT_XRGB8888:
4042 case DRM_FORMAT_ARGB8888:
4043 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4045 case DRM_FORMAT_XRGB2101010:
4046 case DRM_FORMAT_ARGB2101010:
4047 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4049 case DRM_FORMAT_XBGR2101010:
4050 case DRM_FORMAT_ABGR2101010:
4051 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4053 case DRM_FORMAT_XBGR8888:
4054 case DRM_FORMAT_ABGR8888:
4055 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4057 case DRM_FORMAT_NV21:
4058 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4060 case DRM_FORMAT_NV12:
4061 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4063 case DRM_FORMAT_P010:
4064 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4066 case DRM_FORMAT_XRGB16161616F:
4067 case DRM_FORMAT_ARGB16161616F:
4068 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4070 case DRM_FORMAT_XBGR16161616F:
4071 case DRM_FORMAT_ABGR16161616F:
4072 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4076 "Unsupported screen format %s\n",
4077 drm_get_format_name(fb->format->format, &format_name));
4081 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4082 case DRM_MODE_ROTATE_0:
4083 plane_info->rotation = ROTATION_ANGLE_0;
4085 case DRM_MODE_ROTATE_90:
4086 plane_info->rotation = ROTATION_ANGLE_90;
4088 case DRM_MODE_ROTATE_180:
4089 plane_info->rotation = ROTATION_ANGLE_180;
4091 case DRM_MODE_ROTATE_270:
4092 plane_info->rotation = ROTATION_ANGLE_270;
4095 plane_info->rotation = ROTATION_ANGLE_0;
4099 plane_info->visible = true;
4100 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4102 plane_info->layer_index = 0;
4104 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4105 &plane_info->color_space);
4109 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4110 plane_info->rotation, tiling_flags,
4111 &plane_info->tiling_info,
4112 &plane_info->plane_size,
4113 &plane_info->dcc, address, tmz_surface,
4118 fill_blending_from_plane_state(
4119 plane_state, &plane_info->per_pixel_alpha,
4120 &plane_info->global_alpha, &plane_info->global_alpha_value);
4125 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4126 struct dc_plane_state *dc_plane_state,
4127 struct drm_plane_state *plane_state,
4128 struct drm_crtc_state *crtc_state)
4130 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4131 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4132 struct dc_scaling_info scaling_info;
4133 struct dc_plane_info plane_info;
4135 bool force_disable_dcc = false;
4137 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4141 dc_plane_state->src_rect = scaling_info.src_rect;
4142 dc_plane_state->dst_rect = scaling_info.dst_rect;
4143 dc_plane_state->clip_rect = scaling_info.clip_rect;
4144 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4146 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4147 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4148 dm_plane_state->tiling_flags,
4150 &dc_plane_state->address,
4151 dm_plane_state->tmz_surface,
4156 dc_plane_state->format = plane_info.format;
4157 dc_plane_state->color_space = plane_info.color_space;
4158 dc_plane_state->format = plane_info.format;
4159 dc_plane_state->plane_size = plane_info.plane_size;
4160 dc_plane_state->rotation = plane_info.rotation;
4161 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4162 dc_plane_state->stereo_format = plane_info.stereo_format;
4163 dc_plane_state->tiling_info = plane_info.tiling_info;
4164 dc_plane_state->visible = plane_info.visible;
4165 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4166 dc_plane_state->global_alpha = plane_info.global_alpha;
4167 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4168 dc_plane_state->dcc = plane_info.dcc;
4169 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4172 * Always set input transfer function, since plane state is refreshed
4175 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4182 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4183 const struct dm_connector_state *dm_state,
4184 struct dc_stream_state *stream)
4186 enum amdgpu_rmx_type rmx_type;
4188 struct rect src = { 0 }; /* viewport in composition space*/
4189 struct rect dst = { 0 }; /* stream addressable area */
4191 /* no mode. nothing to be done */
4195 /* Full screen scaling by default */
4196 src.width = mode->hdisplay;
4197 src.height = mode->vdisplay;
4198 dst.width = stream->timing.h_addressable;
4199 dst.height = stream->timing.v_addressable;
4202 rmx_type = dm_state->scaling;
4203 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4204 if (src.width * dst.height <
4205 src.height * dst.width) {
4206 /* height needs less upscaling/more downscaling */
4207 dst.width = src.width *
4208 dst.height / src.height;
4210 /* width needs less upscaling/more downscaling */
4211 dst.height = src.height *
4212 dst.width / src.width;
4214 } else if (rmx_type == RMX_CENTER) {
4218 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4219 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4221 if (dm_state->underscan_enable) {
4222 dst.x += dm_state->underscan_hborder / 2;
4223 dst.y += dm_state->underscan_vborder / 2;
4224 dst.width -= dm_state->underscan_hborder;
4225 dst.height -= dm_state->underscan_vborder;
4232 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4233 dst.x, dst.y, dst.width, dst.height);
4237 static enum dc_color_depth
4238 convert_color_depth_from_display_info(const struct drm_connector *connector,
4239 bool is_y420, int requested_bpc)
4246 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4247 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4249 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4251 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4254 bpc = (uint8_t)connector->display_info.bpc;
4255 /* Assume 8 bpc by default if no bpc is specified. */
4256 bpc = bpc ? bpc : 8;
4259 if (requested_bpc > 0) {
4261 * Cap display bpc based on the user requested value.
4263 * The value for state->max_bpc may not correctly updated
4264 * depending on when the connector gets added to the state
4265 * or if this was called outside of atomic check, so it
4266 * can't be used directly.
4268 bpc = min_t(u8, bpc, requested_bpc);
4270 /* Round down to the nearest even number. */
4271 bpc = bpc - (bpc & 1);
4277 * Temporary Work around, DRM doesn't parse color depth for
4278 * EDID revision before 1.4
4279 * TODO: Fix edid parsing
4281 return COLOR_DEPTH_888;
4283 return COLOR_DEPTH_666;
4285 return COLOR_DEPTH_888;
4287 return COLOR_DEPTH_101010;
4289 return COLOR_DEPTH_121212;
4291 return COLOR_DEPTH_141414;
4293 return COLOR_DEPTH_161616;
4295 return COLOR_DEPTH_UNDEFINED;
4299 static enum dc_aspect_ratio
4300 get_aspect_ratio(const struct drm_display_mode *mode_in)
4302 /* 1-1 mapping, since both enums follow the HDMI spec. */
4303 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4306 static enum dc_color_space
4307 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4309 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4311 switch (dc_crtc_timing->pixel_encoding) {
4312 case PIXEL_ENCODING_YCBCR422:
4313 case PIXEL_ENCODING_YCBCR444:
4314 case PIXEL_ENCODING_YCBCR420:
4317 * 27030khz is the separation point between HDTV and SDTV
4318 * according to HDMI spec, we use YCbCr709 and YCbCr601
4321 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4322 if (dc_crtc_timing->flags.Y_ONLY)
4324 COLOR_SPACE_YCBCR709_LIMITED;
4326 color_space = COLOR_SPACE_YCBCR709;
4328 if (dc_crtc_timing->flags.Y_ONLY)
4330 COLOR_SPACE_YCBCR601_LIMITED;
4332 color_space = COLOR_SPACE_YCBCR601;
4337 case PIXEL_ENCODING_RGB:
4338 color_space = COLOR_SPACE_SRGB;
4349 static bool adjust_colour_depth_from_display_info(
4350 struct dc_crtc_timing *timing_out,
4351 const struct drm_display_info *info)
4353 enum dc_color_depth depth = timing_out->display_color_depth;
4356 normalized_clk = timing_out->pix_clk_100hz / 10;
4357 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4358 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4359 normalized_clk /= 2;
4360 /* Adjusting pix clock following on HDMI spec based on colour depth */
4362 case COLOR_DEPTH_888:
4364 case COLOR_DEPTH_101010:
4365 normalized_clk = (normalized_clk * 30) / 24;
4367 case COLOR_DEPTH_121212:
4368 normalized_clk = (normalized_clk * 36) / 24;
4370 case COLOR_DEPTH_161616:
4371 normalized_clk = (normalized_clk * 48) / 24;
4374 /* The above depths are the only ones valid for HDMI. */
4377 if (normalized_clk <= info->max_tmds_clock) {
4378 timing_out->display_color_depth = depth;
4381 } while (--depth > COLOR_DEPTH_666);
4385 static void fill_stream_properties_from_drm_display_mode(
4386 struct dc_stream_state *stream,
4387 const struct drm_display_mode *mode_in,
4388 const struct drm_connector *connector,
4389 const struct drm_connector_state *connector_state,
4390 const struct dc_stream_state *old_stream,
4393 struct dc_crtc_timing *timing_out = &stream->timing;
4394 const struct drm_display_info *info = &connector->display_info;
4395 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4396 struct hdmi_vendor_infoframe hv_frame;
4397 struct hdmi_avi_infoframe avi_frame;
4399 memset(&hv_frame, 0, sizeof(hv_frame));
4400 memset(&avi_frame, 0, sizeof(avi_frame));
4402 timing_out->h_border_left = 0;
4403 timing_out->h_border_right = 0;
4404 timing_out->v_border_top = 0;
4405 timing_out->v_border_bottom = 0;
4406 /* TODO: un-hardcode */
4407 if (drm_mode_is_420_only(info, mode_in)
4408 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4409 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4410 else if (drm_mode_is_420_also(info, mode_in)
4411 && aconnector->force_yuv420_output)
4412 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4413 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4414 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4415 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4417 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4419 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4420 timing_out->display_color_depth = convert_color_depth_from_display_info(
4422 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4424 timing_out->scan_type = SCANNING_TYPE_NODATA;
4425 timing_out->hdmi_vic = 0;
4428 timing_out->vic = old_stream->timing.vic;
4429 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4430 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4432 timing_out->vic = drm_match_cea_mode(mode_in);
4433 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4434 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4435 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4436 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4439 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4440 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4441 timing_out->vic = avi_frame.video_code;
4442 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4443 timing_out->hdmi_vic = hv_frame.vic;
4446 timing_out->h_addressable = mode_in->crtc_hdisplay;
4447 timing_out->h_total = mode_in->crtc_htotal;
4448 timing_out->h_sync_width =
4449 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4450 timing_out->h_front_porch =
4451 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4452 timing_out->v_total = mode_in->crtc_vtotal;
4453 timing_out->v_addressable = mode_in->crtc_vdisplay;
4454 timing_out->v_front_porch =
4455 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4456 timing_out->v_sync_width =
4457 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4458 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4459 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4461 stream->output_color_space = get_output_color_space(timing_out);
4463 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4464 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4465 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4466 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4467 drm_mode_is_420_also(info, mode_in) &&
4468 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4469 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4470 adjust_colour_depth_from_display_info(timing_out, info);
4475 static void fill_audio_info(struct audio_info *audio_info,
4476 const struct drm_connector *drm_connector,
4477 const struct dc_sink *dc_sink)
4480 int cea_revision = 0;
4481 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4483 audio_info->manufacture_id = edid_caps->manufacturer_id;
4484 audio_info->product_id = edid_caps->product_id;
4486 cea_revision = drm_connector->display_info.cea_rev;
4488 strscpy(audio_info->display_name,
4489 edid_caps->display_name,
4490 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4492 if (cea_revision >= 3) {
4493 audio_info->mode_count = edid_caps->audio_mode_count;
4495 for (i = 0; i < audio_info->mode_count; ++i) {
4496 audio_info->modes[i].format_code =
4497 (enum audio_format_code)
4498 (edid_caps->audio_modes[i].format_code);
4499 audio_info->modes[i].channel_count =
4500 edid_caps->audio_modes[i].channel_count;
4501 audio_info->modes[i].sample_rates.all =
4502 edid_caps->audio_modes[i].sample_rate;
4503 audio_info->modes[i].sample_size =
4504 edid_caps->audio_modes[i].sample_size;
4508 audio_info->flags.all = edid_caps->speaker_flags;
4510 /* TODO: We only check for the progressive mode, check for interlace mode too */
4511 if (drm_connector->latency_present[0]) {
4512 audio_info->video_latency = drm_connector->video_latency[0];
4513 audio_info->audio_latency = drm_connector->audio_latency[0];
4516 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4521 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4522 struct drm_display_mode *dst_mode)
4524 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4525 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4526 dst_mode->crtc_clock = src_mode->crtc_clock;
4527 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4528 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4529 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4530 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4531 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4532 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4533 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4534 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4535 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4536 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4537 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4541 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4542 const struct drm_display_mode *native_mode,
4545 if (scale_enabled) {
4546 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4547 } else if (native_mode->clock == drm_mode->clock &&
4548 native_mode->htotal == drm_mode->htotal &&
4549 native_mode->vtotal == drm_mode->vtotal) {
4550 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4552 /* no scaling nor amdgpu inserted, no need to patch */
4556 static struct dc_sink *
4557 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4559 struct dc_sink_init_data sink_init_data = { 0 };
4560 struct dc_sink *sink = NULL;
4561 sink_init_data.link = aconnector->dc_link;
4562 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4564 sink = dc_sink_create(&sink_init_data);
4566 DRM_ERROR("Failed to create sink!\n");
4569 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4574 static void set_multisync_trigger_params(
4575 struct dc_stream_state *stream)
4577 if (stream->triggered_crtc_reset.enabled) {
4578 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4579 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4583 static void set_master_stream(struct dc_stream_state *stream_set[],
4586 int j, highest_rfr = 0, master_stream = 0;
4588 for (j = 0; j < stream_count; j++) {
4589 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4590 int refresh_rate = 0;
4592 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4593 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4594 if (refresh_rate > highest_rfr) {
4595 highest_rfr = refresh_rate;
4600 for (j = 0; j < stream_count; j++) {
4602 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4606 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4610 if (context->stream_count < 2)
4612 for (i = 0; i < context->stream_count ; i++) {
4613 if (!context->streams[i])
4616 * TODO: add a function to read AMD VSDB bits and set
4617 * crtc_sync_master.multi_sync_enabled flag
4618 * For now it's set to false
4620 set_multisync_trigger_params(context->streams[i]);
4622 set_master_stream(context->streams, context->stream_count);
4625 static struct dc_stream_state *
4626 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4627 const struct drm_display_mode *drm_mode,
4628 const struct dm_connector_state *dm_state,
4629 const struct dc_stream_state *old_stream,
4632 struct drm_display_mode *preferred_mode = NULL;
4633 struct drm_connector *drm_connector;
4634 const struct drm_connector_state *con_state =
4635 dm_state ? &dm_state->base : NULL;
4636 struct dc_stream_state *stream = NULL;
4637 struct drm_display_mode mode = *drm_mode;
4638 bool native_mode_found = false;
4639 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4641 int preferred_refresh = 0;
4642 #if defined(CONFIG_DRM_AMD_DC_DCN)
4643 struct dsc_dec_dpcd_caps dsc_caps;
4645 uint32_t link_bandwidth_kbps;
4647 struct dc_sink *sink = NULL;
4648 if (aconnector == NULL) {
4649 DRM_ERROR("aconnector is NULL!\n");
4653 drm_connector = &aconnector->base;
4655 if (!aconnector->dc_sink) {
4656 sink = create_fake_sink(aconnector);
4660 sink = aconnector->dc_sink;
4661 dc_sink_retain(sink);
4664 stream = dc_create_stream_for_sink(sink);
4666 if (stream == NULL) {
4667 DRM_ERROR("Failed to create stream for sink!\n");
4671 stream->dm_stream_context = aconnector;
4673 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4674 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4676 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4677 /* Search for preferred mode */
4678 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4679 native_mode_found = true;
4683 if (!native_mode_found)
4684 preferred_mode = list_first_entry_or_null(
4685 &aconnector->base.modes,
4686 struct drm_display_mode,
4689 mode_refresh = drm_mode_vrefresh(&mode);
4691 if (preferred_mode == NULL) {
4693 * This may not be an error, the use case is when we have no
4694 * usermode calls to reset and set mode upon hotplug. In this
4695 * case, we call set mode ourselves to restore the previous mode
4696 * and the modelist may not be filled in in time.
4698 DRM_DEBUG_DRIVER("No preferred mode found\n");
4700 decide_crtc_timing_for_drm_display_mode(
4701 &mode, preferred_mode,
4702 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4703 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4707 drm_mode_set_crtcinfo(&mode, 0);
4710 * If scaling is enabled and refresh rate didn't change
4711 * we copy the vic and polarities of the old timings
4713 if (!scale || mode_refresh != preferred_refresh)
4714 fill_stream_properties_from_drm_display_mode(stream,
4715 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4717 fill_stream_properties_from_drm_display_mode(stream,
4718 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4720 stream->timing.flags.DSC = 0;
4722 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4723 #if defined(CONFIG_DRM_AMD_DC_DCN)
4724 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4725 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4726 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4729 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4730 dc_link_get_link_cap(aconnector->dc_link));
4732 #if defined(CONFIG_DRM_AMD_DC_DCN)
4733 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4734 /* Set DSC policy according to dsc_clock_en */
4735 dc_dsc_policy_set_enable_dsc_when_not_needed(
4736 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4738 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4740 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4741 link_bandwidth_kbps,
4743 &stream->timing.dsc_cfg))
4744 stream->timing.flags.DSC = 1;
4745 /* Overwrite the stream flag if DSC is enabled through debugfs */
4746 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4747 stream->timing.flags.DSC = 1;
4749 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4750 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4752 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4753 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4755 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4756 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4761 update_stream_scaling_settings(&mode, dm_state, stream);
4764 &stream->audio_info,
4768 update_stream_signal(stream, sink);
4770 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4771 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4773 if (stream->link->psr_settings.psr_feature_enabled) {
4775 // should decide stream support vsc sdp colorimetry capability
4776 // before building vsc info packet
4778 stream->use_vsc_sdp_for_colorimetry = false;
4779 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4780 stream->use_vsc_sdp_for_colorimetry =
4781 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4783 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4784 stream->use_vsc_sdp_for_colorimetry = true;
4786 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4789 dc_sink_release(sink);
4794 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4796 drm_crtc_cleanup(crtc);
4800 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4801 struct drm_crtc_state *state)
4803 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4805 /* TODO Destroy dc_stream objects are stream object is flattened */
4807 dc_stream_release(cur->stream);
4810 __drm_atomic_helper_crtc_destroy_state(state);
4816 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4818 struct dm_crtc_state *state;
4821 dm_crtc_destroy_state(crtc, crtc->state);
4823 state = kzalloc(sizeof(*state), GFP_KERNEL);
4824 if (WARN_ON(!state))
4827 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4830 static struct drm_crtc_state *
4831 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4833 struct dm_crtc_state *state, *cur;
4835 cur = to_dm_crtc_state(crtc->state);
4837 if (WARN_ON(!crtc->state))
4840 state = kzalloc(sizeof(*state), GFP_KERNEL);
4844 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4847 state->stream = cur->stream;
4848 dc_stream_retain(state->stream);
4851 state->active_planes = cur->active_planes;
4852 state->vrr_infopacket = cur->vrr_infopacket;
4853 state->abm_level = cur->abm_level;
4854 state->vrr_supported = cur->vrr_supported;
4855 state->freesync_config = cur->freesync_config;
4856 state->crc_src = cur->crc_src;
4857 state->cm_has_degamma = cur->cm_has_degamma;
4858 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4860 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4862 return &state->base;
4865 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4867 enum dc_irq_source irq_source;
4868 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4869 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4872 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4874 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4876 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4877 acrtc->crtc_id, enable ? "en" : "dis", rc);
4881 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4883 enum dc_irq_source irq_source;
4884 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4885 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4886 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4890 /* vblank irq on -> Only need vupdate irq in vrr mode */
4891 if (amdgpu_dm_vrr_active(acrtc_state))
4892 rc = dm_set_vupdate_irq(crtc, true);
4894 /* vblank irq off -> vupdate irq off */
4895 rc = dm_set_vupdate_irq(crtc, false);
4901 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4902 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4905 static int dm_enable_vblank(struct drm_crtc *crtc)
4907 return dm_set_vblank(crtc, true);
4910 static void dm_disable_vblank(struct drm_crtc *crtc)
4912 dm_set_vblank(crtc, false);
4915 /* Implemented only the options currently availible for the driver */
4916 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4917 .reset = dm_crtc_reset_state,
4918 .destroy = amdgpu_dm_crtc_destroy,
4919 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4920 .set_config = drm_atomic_helper_set_config,
4921 .page_flip = drm_atomic_helper_page_flip,
4922 .atomic_duplicate_state = dm_crtc_duplicate_state,
4923 .atomic_destroy_state = dm_crtc_destroy_state,
4924 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4925 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4926 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4927 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4928 .enable_vblank = dm_enable_vblank,
4929 .disable_vblank = dm_disable_vblank,
4930 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4933 static enum drm_connector_status
4934 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4937 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4941 * 1. This interface is NOT called in context of HPD irq.
4942 * 2. This interface *is called* in context of user-mode ioctl. Which
4943 * makes it a bad place for *any* MST-related activity.
4946 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4947 !aconnector->fake_enable)
4948 connected = (aconnector->dc_sink != NULL);
4950 connected = (aconnector->base.force == DRM_FORCE_ON);
4952 update_subconnector_property(aconnector);
4954 return (connected ? connector_status_connected :
4955 connector_status_disconnected);
4958 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4959 struct drm_connector_state *connector_state,
4960 struct drm_property *property,
4963 struct drm_device *dev = connector->dev;
4964 struct amdgpu_device *adev = drm_to_adev(dev);
4965 struct dm_connector_state *dm_old_state =
4966 to_dm_connector_state(connector->state);
4967 struct dm_connector_state *dm_new_state =
4968 to_dm_connector_state(connector_state);
4972 if (property == dev->mode_config.scaling_mode_property) {
4973 enum amdgpu_rmx_type rmx_type;
4976 case DRM_MODE_SCALE_CENTER:
4977 rmx_type = RMX_CENTER;
4979 case DRM_MODE_SCALE_ASPECT:
4980 rmx_type = RMX_ASPECT;
4982 case DRM_MODE_SCALE_FULLSCREEN:
4983 rmx_type = RMX_FULL;
4985 case DRM_MODE_SCALE_NONE:
4991 if (dm_old_state->scaling == rmx_type)
4994 dm_new_state->scaling = rmx_type;
4996 } else if (property == adev->mode_info.underscan_hborder_property) {
4997 dm_new_state->underscan_hborder = val;
4999 } else if (property == adev->mode_info.underscan_vborder_property) {
5000 dm_new_state->underscan_vborder = val;
5002 } else if (property == adev->mode_info.underscan_property) {
5003 dm_new_state->underscan_enable = val;
5005 } else if (property == adev->mode_info.abm_level_property) {
5006 dm_new_state->abm_level = val;
5013 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5014 const struct drm_connector_state *state,
5015 struct drm_property *property,
5018 struct drm_device *dev = connector->dev;
5019 struct amdgpu_device *adev = drm_to_adev(dev);
5020 struct dm_connector_state *dm_state =
5021 to_dm_connector_state(state);
5024 if (property == dev->mode_config.scaling_mode_property) {
5025 switch (dm_state->scaling) {
5027 *val = DRM_MODE_SCALE_CENTER;
5030 *val = DRM_MODE_SCALE_ASPECT;
5033 *val = DRM_MODE_SCALE_FULLSCREEN;
5037 *val = DRM_MODE_SCALE_NONE;
5041 } else if (property == adev->mode_info.underscan_hborder_property) {
5042 *val = dm_state->underscan_hborder;
5044 } else if (property == adev->mode_info.underscan_vborder_property) {
5045 *val = dm_state->underscan_vborder;
5047 } else if (property == adev->mode_info.underscan_property) {
5048 *val = dm_state->underscan_enable;
5050 } else if (property == adev->mode_info.abm_level_property) {
5051 *val = dm_state->abm_level;
5058 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5060 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5062 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5065 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5067 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5068 const struct dc_link *link = aconnector->dc_link;
5069 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5070 struct amdgpu_display_manager *dm = &adev->dm;
5073 * Call only if mst_mgr was iniitalized before since it's not done
5074 * for all connector types.
5076 if (aconnector->mst_mgr.dev)
5077 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5079 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5080 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5082 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5083 link->type != dc_connection_none &&
5084 dm->backlight_dev) {
5085 backlight_device_unregister(dm->backlight_dev);
5086 dm->backlight_dev = NULL;
5090 if (aconnector->dc_em_sink)
5091 dc_sink_release(aconnector->dc_em_sink);
5092 aconnector->dc_em_sink = NULL;
5093 if (aconnector->dc_sink)
5094 dc_sink_release(aconnector->dc_sink);
5095 aconnector->dc_sink = NULL;
5097 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5098 drm_connector_unregister(connector);
5099 drm_connector_cleanup(connector);
5100 if (aconnector->i2c) {
5101 i2c_del_adapter(&aconnector->i2c->base);
5102 kfree(aconnector->i2c);
5104 kfree(aconnector->dm_dp_aux.aux.name);
5109 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5111 struct dm_connector_state *state =
5112 to_dm_connector_state(connector->state);
5114 if (connector->state)
5115 __drm_atomic_helper_connector_destroy_state(connector->state);
5119 state = kzalloc(sizeof(*state), GFP_KERNEL);
5122 state->scaling = RMX_OFF;
5123 state->underscan_enable = false;
5124 state->underscan_hborder = 0;
5125 state->underscan_vborder = 0;
5126 state->base.max_requested_bpc = 8;
5127 state->vcpi_slots = 0;
5129 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5130 state->abm_level = amdgpu_dm_abm_level;
5132 __drm_atomic_helper_connector_reset(connector, &state->base);
5136 struct drm_connector_state *
5137 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5139 struct dm_connector_state *state =
5140 to_dm_connector_state(connector->state);
5142 struct dm_connector_state *new_state =
5143 kmemdup(state, sizeof(*state), GFP_KERNEL);
5148 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5150 new_state->freesync_capable = state->freesync_capable;
5151 new_state->abm_level = state->abm_level;
5152 new_state->scaling = state->scaling;
5153 new_state->underscan_enable = state->underscan_enable;
5154 new_state->underscan_hborder = state->underscan_hborder;
5155 new_state->underscan_vborder = state->underscan_vborder;
5156 new_state->vcpi_slots = state->vcpi_slots;
5157 new_state->pbn = state->pbn;
5158 return &new_state->base;
5162 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5164 struct amdgpu_dm_connector *amdgpu_dm_connector =
5165 to_amdgpu_dm_connector(connector);
5168 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5169 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5170 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5171 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5176 #if defined(CONFIG_DEBUG_FS)
5177 connector_debugfs_init(amdgpu_dm_connector);
5183 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5184 .reset = amdgpu_dm_connector_funcs_reset,
5185 .detect = amdgpu_dm_connector_detect,
5186 .fill_modes = drm_helper_probe_single_connector_modes,
5187 .destroy = amdgpu_dm_connector_destroy,
5188 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5189 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5190 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5191 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5192 .late_register = amdgpu_dm_connector_late_register,
5193 .early_unregister = amdgpu_dm_connector_unregister
5196 static int get_modes(struct drm_connector *connector)
5198 return amdgpu_dm_connector_get_modes(connector);
5201 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5203 struct dc_sink_init_data init_params = {
5204 .link = aconnector->dc_link,
5205 .sink_signal = SIGNAL_TYPE_VIRTUAL
5209 if (!aconnector->base.edid_blob_ptr) {
5210 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5211 aconnector->base.name);
5213 aconnector->base.force = DRM_FORCE_OFF;
5214 aconnector->base.override_edid = false;
5218 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5220 aconnector->edid = edid;
5222 aconnector->dc_em_sink = dc_link_add_remote_sink(
5223 aconnector->dc_link,
5225 (edid->extensions + 1) * EDID_LENGTH,
5228 if (aconnector->base.force == DRM_FORCE_ON) {
5229 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5230 aconnector->dc_link->local_sink :
5231 aconnector->dc_em_sink;
5232 dc_sink_retain(aconnector->dc_sink);
5236 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5238 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5241 * In case of headless boot with force on for DP managed connector
5242 * Those settings have to be != 0 to get initial modeset
5244 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5245 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5246 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5250 aconnector->base.override_edid = true;
5251 create_eml_sink(aconnector);
5254 static struct dc_stream_state *
5255 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5256 const struct drm_display_mode *drm_mode,
5257 const struct dm_connector_state *dm_state,
5258 const struct dc_stream_state *old_stream)
5260 struct drm_connector *connector = &aconnector->base;
5261 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5262 struct dc_stream_state *stream;
5263 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5264 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5265 enum dc_status dc_result = DC_OK;
5268 stream = create_stream_for_sink(aconnector, drm_mode,
5269 dm_state, old_stream,
5271 if (stream == NULL) {
5272 DRM_ERROR("Failed to create stream for sink!\n");
5276 dc_result = dc_validate_stream(adev->dm.dc, stream);
5278 if (dc_result != DC_OK) {
5279 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5284 dc_status_to_str(dc_result));
5286 dc_stream_release(stream);
5288 requested_bpc -= 2; /* lower bpc to retry validation */
5291 } while (stream == NULL && requested_bpc >= 6);
5296 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5297 struct drm_display_mode *mode)
5299 int result = MODE_ERROR;
5300 struct dc_sink *dc_sink;
5301 /* TODO: Unhardcode stream count */
5302 struct dc_stream_state *stream;
5303 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5305 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5306 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5310 * Only run this the first time mode_valid is called to initilialize
5313 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5314 !aconnector->dc_em_sink)
5315 handle_edid_mgmt(aconnector);
5317 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5319 if (dc_sink == NULL) {
5320 DRM_ERROR("dc_sink is NULL!\n");
5324 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5326 dc_stream_release(stream);
5331 /* TODO: error handling*/
5335 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5336 struct dc_info_packet *out)
5338 struct hdmi_drm_infoframe frame;
5339 unsigned char buf[30]; /* 26 + 4 */
5343 memset(out, 0, sizeof(*out));
5345 if (!state->hdr_output_metadata)
5348 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5352 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5356 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5360 /* Prepare the infopacket for DC. */
5361 switch (state->connector->connector_type) {
5362 case DRM_MODE_CONNECTOR_HDMIA:
5363 out->hb0 = 0x87; /* type */
5364 out->hb1 = 0x01; /* version */
5365 out->hb2 = 0x1A; /* length */
5366 out->sb[0] = buf[3]; /* checksum */
5370 case DRM_MODE_CONNECTOR_DisplayPort:
5371 case DRM_MODE_CONNECTOR_eDP:
5372 out->hb0 = 0x00; /* sdp id, zero */
5373 out->hb1 = 0x87; /* type */
5374 out->hb2 = 0x1D; /* payload len - 1 */
5375 out->hb3 = (0x13 << 2); /* sdp version */
5376 out->sb[0] = 0x01; /* version */
5377 out->sb[1] = 0x1A; /* length */
5385 memcpy(&out->sb[i], &buf[4], 26);
5388 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5389 sizeof(out->sb), false);
5395 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5396 const struct drm_connector_state *new_state)
5398 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5399 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5401 if (old_blob != new_blob) {
5402 if (old_blob && new_blob &&
5403 old_blob->length == new_blob->length)
5404 return memcmp(old_blob->data, new_blob->data,
5414 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5415 struct drm_atomic_state *state)
5417 struct drm_connector_state *new_con_state =
5418 drm_atomic_get_new_connector_state(state, conn);
5419 struct drm_connector_state *old_con_state =
5420 drm_atomic_get_old_connector_state(state, conn);
5421 struct drm_crtc *crtc = new_con_state->crtc;
5422 struct drm_crtc_state *new_crtc_state;
5428 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5429 struct dc_info_packet hdr_infopacket;
5431 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5435 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5436 if (IS_ERR(new_crtc_state))
5437 return PTR_ERR(new_crtc_state);
5440 * DC considers the stream backends changed if the
5441 * static metadata changes. Forcing the modeset also
5442 * gives a simple way for userspace to switch from
5443 * 8bpc to 10bpc when setting the metadata to enter
5446 * Changing the static metadata after it's been
5447 * set is permissible, however. So only force a
5448 * modeset if we're entering or exiting HDR.
5450 new_crtc_state->mode_changed =
5451 !old_con_state->hdr_output_metadata ||
5452 !new_con_state->hdr_output_metadata;
5458 static const struct drm_connector_helper_funcs
5459 amdgpu_dm_connector_helper_funcs = {
5461 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5462 * modes will be filtered by drm_mode_validate_size(), and those modes
5463 * are missing after user start lightdm. So we need to renew modes list.
5464 * in get_modes call back, not just return the modes count
5466 .get_modes = get_modes,
5467 .mode_valid = amdgpu_dm_connector_mode_valid,
5468 .atomic_check = amdgpu_dm_connector_atomic_check,
5471 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5475 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5477 struct drm_atomic_state *state = new_crtc_state->state;
5478 struct drm_plane *plane;
5481 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5482 struct drm_plane_state *new_plane_state;
5484 /* Cursor planes are "fake". */
5485 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5488 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5490 if (!new_plane_state) {
5492 * The plane is enable on the CRTC and hasn't changed
5493 * state. This means that it previously passed
5494 * validation and is therefore enabled.
5500 /* We need a framebuffer to be considered enabled. */
5501 num_active += (new_plane_state->fb != NULL);
5507 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5508 struct drm_crtc_state *new_crtc_state)
5510 struct dm_crtc_state *dm_new_crtc_state =
5511 to_dm_crtc_state(new_crtc_state);
5513 dm_new_crtc_state->active_planes = 0;
5515 if (!dm_new_crtc_state->stream)
5518 dm_new_crtc_state->active_planes =
5519 count_crtc_active_planes(new_crtc_state);
5522 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5523 struct drm_crtc_state *state)
5525 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5526 struct dc *dc = adev->dm.dc;
5527 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5530 dm_update_crtc_active_planes(crtc, state);
5532 if (unlikely(!dm_crtc_state->stream &&
5533 modeset_required(state, NULL, dm_crtc_state->stream))) {
5539 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5540 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5541 * planes are disabled, which is not supported by the hardware. And there is legacy
5542 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5544 if (state->enable &&
5545 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5548 /* In some use cases, like reset, no stream is attached */
5549 if (!dm_crtc_state->stream)
5552 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5558 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5559 const struct drm_display_mode *mode,
5560 struct drm_display_mode *adjusted_mode)
5565 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5566 .disable = dm_crtc_helper_disable,
5567 .atomic_check = dm_crtc_helper_atomic_check,
5568 .mode_fixup = dm_crtc_helper_mode_fixup,
5569 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5572 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5577 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5579 switch (display_color_depth) {
5580 case COLOR_DEPTH_666:
5582 case COLOR_DEPTH_888:
5584 case COLOR_DEPTH_101010:
5586 case COLOR_DEPTH_121212:
5588 case COLOR_DEPTH_141414:
5590 case COLOR_DEPTH_161616:
5598 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5599 struct drm_crtc_state *crtc_state,
5600 struct drm_connector_state *conn_state)
5602 struct drm_atomic_state *state = crtc_state->state;
5603 struct drm_connector *connector = conn_state->connector;
5604 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5605 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5606 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5607 struct drm_dp_mst_topology_mgr *mst_mgr;
5608 struct drm_dp_mst_port *mst_port;
5609 enum dc_color_depth color_depth;
5611 bool is_y420 = false;
5613 if (!aconnector->port || !aconnector->dc_sink)
5616 mst_port = aconnector->port;
5617 mst_mgr = &aconnector->mst_port->mst_mgr;
5619 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5622 if (!state->duplicated) {
5623 int max_bpc = conn_state->max_requested_bpc;
5624 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5625 aconnector->force_yuv420_output;
5626 color_depth = convert_color_depth_from_display_info(connector,
5629 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5630 clock = adjusted_mode->clock;
5631 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5633 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5636 dm_new_connector_state->pbn,
5637 dm_mst_get_pbn_divider(aconnector->dc_link));
5638 if (dm_new_connector_state->vcpi_slots < 0) {
5639 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5640 return dm_new_connector_state->vcpi_slots;
5645 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5646 .disable = dm_encoder_helper_disable,
5647 .atomic_check = dm_encoder_helper_atomic_check
5650 #if defined(CONFIG_DRM_AMD_DC_DCN)
5651 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5652 struct dc_state *dc_state)
5654 struct dc_stream_state *stream = NULL;
5655 struct drm_connector *connector;
5656 struct drm_connector_state *new_con_state, *old_con_state;
5657 struct amdgpu_dm_connector *aconnector;
5658 struct dm_connector_state *dm_conn_state;
5659 int i, j, clock, bpp;
5660 int vcpi, pbn_div, pbn = 0;
5662 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5664 aconnector = to_amdgpu_dm_connector(connector);
5666 if (!aconnector->port)
5669 if (!new_con_state || !new_con_state->crtc)
5672 dm_conn_state = to_dm_connector_state(new_con_state);
5674 for (j = 0; j < dc_state->stream_count; j++) {
5675 stream = dc_state->streams[j];
5679 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5688 if (stream->timing.flags.DSC != 1) {
5689 drm_dp_mst_atomic_enable_dsc(state,
5697 pbn_div = dm_mst_get_pbn_divider(stream->link);
5698 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5699 clock = stream->timing.pix_clk_100hz / 10;
5700 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5701 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5708 dm_conn_state->pbn = pbn;
5709 dm_conn_state->vcpi_slots = vcpi;
5715 static void dm_drm_plane_reset(struct drm_plane *plane)
5717 struct dm_plane_state *amdgpu_state = NULL;
5720 plane->funcs->atomic_destroy_state(plane, plane->state);
5722 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5723 WARN_ON(amdgpu_state == NULL);
5726 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5729 static struct drm_plane_state *
5730 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5732 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5734 old_dm_plane_state = to_dm_plane_state(plane->state);
5735 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5736 if (!dm_plane_state)
5739 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5741 if (old_dm_plane_state->dc_state) {
5742 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5743 dc_plane_state_retain(dm_plane_state->dc_state);
5746 /* Framebuffer hasn't been updated yet, so retain old flags. */
5747 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5748 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5750 return &dm_plane_state->base;
5753 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5754 struct drm_plane_state *state)
5756 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5758 if (dm_plane_state->dc_state)
5759 dc_plane_state_release(dm_plane_state->dc_state);
5761 drm_atomic_helper_plane_destroy_state(plane, state);
5764 static const struct drm_plane_funcs dm_plane_funcs = {
5765 .update_plane = drm_atomic_helper_update_plane,
5766 .disable_plane = drm_atomic_helper_disable_plane,
5767 .destroy = drm_primary_helper_destroy,
5768 .reset = dm_drm_plane_reset,
5769 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5770 .atomic_destroy_state = dm_drm_plane_destroy_state,
5773 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5774 struct drm_plane_state *new_state)
5776 struct amdgpu_framebuffer *afb;
5777 struct drm_gem_object *obj;
5778 struct amdgpu_device *adev;
5779 struct amdgpu_bo *rbo;
5780 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5781 struct list_head list;
5782 struct ttm_validate_buffer tv;
5783 struct ww_acquire_ctx ticket;
5787 if (!new_state->fb) {
5788 DRM_DEBUG_DRIVER("No FB bound\n");
5792 afb = to_amdgpu_framebuffer(new_state->fb);
5793 obj = new_state->fb->obj[0];
5794 rbo = gem_to_amdgpu_bo(obj);
5795 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5796 INIT_LIST_HEAD(&list);
5800 list_add(&tv.head, &list);
5802 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5804 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5808 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5809 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5811 domain = AMDGPU_GEM_DOMAIN_VRAM;
5813 r = amdgpu_bo_pin(rbo, domain);
5814 if (unlikely(r != 0)) {
5815 if (r != -ERESTARTSYS)
5816 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5817 ttm_eu_backoff_reservation(&ticket, &list);
5821 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5822 if (unlikely(r != 0)) {
5823 amdgpu_bo_unpin(rbo);
5824 ttm_eu_backoff_reservation(&ticket, &list);
5825 DRM_ERROR("%p bind failed\n", rbo);
5829 ttm_eu_backoff_reservation(&ticket, &list);
5831 afb->address = amdgpu_bo_gpu_offset(rbo);
5836 * We don't do surface updates on planes that have been newly created,
5837 * but we also don't have the afb->address during atomic check.
5839 * Fill in buffer attributes depending on the address here, but only on
5840 * newly created planes since they're not being used by DC yet and this
5841 * won't modify global state.
5843 dm_plane_state_old = to_dm_plane_state(plane->state);
5844 dm_plane_state_new = to_dm_plane_state(new_state);
5846 if (dm_plane_state_new->dc_state &&
5847 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5848 struct dc_plane_state *plane_state =
5849 dm_plane_state_new->dc_state;
5850 bool force_disable_dcc = !plane_state->dcc.enable;
5852 fill_plane_buffer_attributes(
5853 adev, afb, plane_state->format, plane_state->rotation,
5854 dm_plane_state_new->tiling_flags,
5855 &plane_state->tiling_info, &plane_state->plane_size,
5856 &plane_state->dcc, &plane_state->address,
5857 dm_plane_state_new->tmz_surface, force_disable_dcc);
5863 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5864 struct drm_plane_state *old_state)
5866 struct amdgpu_bo *rbo;
5872 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5873 r = amdgpu_bo_reserve(rbo, false);
5875 DRM_ERROR("failed to reserve rbo before unpin\n");
5879 amdgpu_bo_unpin(rbo);
5880 amdgpu_bo_unreserve(rbo);
5881 amdgpu_bo_unref(&rbo);
5884 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5885 struct drm_crtc_state *new_crtc_state)
5887 int max_downscale = 0;
5888 int max_upscale = INT_MAX;
5890 /* TODO: These should be checked against DC plane caps */
5891 return drm_atomic_helper_check_plane_state(
5892 state, new_crtc_state, max_downscale, max_upscale, true, true);
5895 static int dm_plane_atomic_check(struct drm_plane *plane,
5896 struct drm_plane_state *state)
5898 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5899 struct dc *dc = adev->dm.dc;
5900 struct dm_plane_state *dm_plane_state;
5901 struct dc_scaling_info scaling_info;
5902 struct drm_crtc_state *new_crtc_state;
5905 dm_plane_state = to_dm_plane_state(state);
5907 if (!dm_plane_state->dc_state)
5911 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5912 if (!new_crtc_state)
5915 ret = dm_plane_helper_check_state(state, new_crtc_state);
5919 ret = fill_dc_scaling_info(state, &scaling_info);
5923 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5929 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5930 struct drm_plane_state *new_plane_state)
5932 /* Only support async updates on cursor planes. */
5933 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5939 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5940 struct drm_plane_state *new_state)
5942 struct drm_plane_state *old_state =
5943 drm_atomic_get_old_plane_state(new_state->state, plane);
5945 swap(plane->state->fb, new_state->fb);
5947 plane->state->src_x = new_state->src_x;
5948 plane->state->src_y = new_state->src_y;
5949 plane->state->src_w = new_state->src_w;
5950 plane->state->src_h = new_state->src_h;
5951 plane->state->crtc_x = new_state->crtc_x;
5952 plane->state->crtc_y = new_state->crtc_y;
5953 plane->state->crtc_w = new_state->crtc_w;
5954 plane->state->crtc_h = new_state->crtc_h;
5956 handle_cursor_update(plane, old_state);
5959 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5960 .prepare_fb = dm_plane_helper_prepare_fb,
5961 .cleanup_fb = dm_plane_helper_cleanup_fb,
5962 .atomic_check = dm_plane_atomic_check,
5963 .atomic_async_check = dm_plane_atomic_async_check,
5964 .atomic_async_update = dm_plane_atomic_async_update
5968 * TODO: these are currently initialized to rgb formats only.
5969 * For future use cases we should either initialize them dynamically based on
5970 * plane capabilities, or initialize this array to all formats, so internal drm
5971 * check will succeed, and let DC implement proper check
5973 static const uint32_t rgb_formats[] = {
5974 DRM_FORMAT_XRGB8888,
5975 DRM_FORMAT_ARGB8888,
5976 DRM_FORMAT_RGBA8888,
5977 DRM_FORMAT_XRGB2101010,
5978 DRM_FORMAT_XBGR2101010,
5979 DRM_FORMAT_ARGB2101010,
5980 DRM_FORMAT_ABGR2101010,
5981 DRM_FORMAT_XBGR8888,
5982 DRM_FORMAT_ABGR8888,
5986 static const uint32_t overlay_formats[] = {
5987 DRM_FORMAT_XRGB8888,
5988 DRM_FORMAT_ARGB8888,
5989 DRM_FORMAT_RGBA8888,
5990 DRM_FORMAT_XBGR8888,
5991 DRM_FORMAT_ABGR8888,
5995 static const u32 cursor_formats[] = {
5999 static int get_plane_formats(const struct drm_plane *plane,
6000 const struct dc_plane_cap *plane_cap,
6001 uint32_t *formats, int max_formats)
6003 int i, num_formats = 0;
6006 * TODO: Query support for each group of formats directly from
6007 * DC plane caps. This will require adding more formats to the
6011 switch (plane->type) {
6012 case DRM_PLANE_TYPE_PRIMARY:
6013 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6014 if (num_formats >= max_formats)
6017 formats[num_formats++] = rgb_formats[i];
6020 if (plane_cap && plane_cap->pixel_format_support.nv12)
6021 formats[num_formats++] = DRM_FORMAT_NV12;
6022 if (plane_cap && plane_cap->pixel_format_support.p010)
6023 formats[num_formats++] = DRM_FORMAT_P010;
6024 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6025 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6026 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6027 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6028 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6032 case DRM_PLANE_TYPE_OVERLAY:
6033 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6034 if (num_formats >= max_formats)
6037 formats[num_formats++] = overlay_formats[i];
6041 case DRM_PLANE_TYPE_CURSOR:
6042 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6043 if (num_formats >= max_formats)
6046 formats[num_formats++] = cursor_formats[i];
6054 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6055 struct drm_plane *plane,
6056 unsigned long possible_crtcs,
6057 const struct dc_plane_cap *plane_cap)
6059 uint32_t formats[32];
6062 unsigned int supported_rotations;
6064 num_formats = get_plane_formats(plane, plane_cap, formats,
6065 ARRAY_SIZE(formats));
6067 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6068 &dm_plane_funcs, formats, num_formats,
6069 NULL, plane->type, NULL);
6073 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6074 plane_cap && plane_cap->per_pixel_alpha) {
6075 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6076 BIT(DRM_MODE_BLEND_PREMULTI);
6078 drm_plane_create_alpha_property(plane);
6079 drm_plane_create_blend_mode_property(plane, blend_caps);
6082 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6084 (plane_cap->pixel_format_support.nv12 ||
6085 plane_cap->pixel_format_support.p010)) {
6086 /* This only affects YUV formats. */
6087 drm_plane_create_color_properties(
6089 BIT(DRM_COLOR_YCBCR_BT601) |
6090 BIT(DRM_COLOR_YCBCR_BT709) |
6091 BIT(DRM_COLOR_YCBCR_BT2020),
6092 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6093 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6094 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6097 supported_rotations =
6098 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6099 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6101 if (dm->adev->asic_type >= CHIP_BONAIRE)
6102 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6103 supported_rotations);
6105 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6107 /* Create (reset) the plane state */
6108 if (plane->funcs->reset)
6109 plane->funcs->reset(plane);
6114 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6115 struct drm_plane *plane,
6116 uint32_t crtc_index)
6118 struct amdgpu_crtc *acrtc = NULL;
6119 struct drm_plane *cursor_plane;
6123 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6127 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6128 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6130 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6134 res = drm_crtc_init_with_planes(
6139 &amdgpu_dm_crtc_funcs, NULL);
6144 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6146 /* Create (reset) the plane state */
6147 if (acrtc->base.funcs->reset)
6148 acrtc->base.funcs->reset(&acrtc->base);
6150 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6151 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6153 acrtc->crtc_id = crtc_index;
6154 acrtc->base.enabled = false;
6155 acrtc->otg_inst = -1;
6157 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6158 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6159 true, MAX_COLOR_LUT_ENTRIES);
6160 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6166 kfree(cursor_plane);
6171 static int to_drm_connector_type(enum signal_type st)
6174 case SIGNAL_TYPE_HDMI_TYPE_A:
6175 return DRM_MODE_CONNECTOR_HDMIA;
6176 case SIGNAL_TYPE_EDP:
6177 return DRM_MODE_CONNECTOR_eDP;
6178 case SIGNAL_TYPE_LVDS:
6179 return DRM_MODE_CONNECTOR_LVDS;
6180 case SIGNAL_TYPE_RGB:
6181 return DRM_MODE_CONNECTOR_VGA;
6182 case SIGNAL_TYPE_DISPLAY_PORT:
6183 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6184 return DRM_MODE_CONNECTOR_DisplayPort;
6185 case SIGNAL_TYPE_DVI_DUAL_LINK:
6186 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6187 return DRM_MODE_CONNECTOR_DVID;
6188 case SIGNAL_TYPE_VIRTUAL:
6189 return DRM_MODE_CONNECTOR_VIRTUAL;
6192 return DRM_MODE_CONNECTOR_Unknown;
6196 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6198 struct drm_encoder *encoder;
6200 /* There is only one encoder per connector */
6201 drm_connector_for_each_possible_encoder(connector, encoder)
6207 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6209 struct drm_encoder *encoder;
6210 struct amdgpu_encoder *amdgpu_encoder;
6212 encoder = amdgpu_dm_connector_to_encoder(connector);
6214 if (encoder == NULL)
6217 amdgpu_encoder = to_amdgpu_encoder(encoder);
6219 amdgpu_encoder->native_mode.clock = 0;
6221 if (!list_empty(&connector->probed_modes)) {
6222 struct drm_display_mode *preferred_mode = NULL;
6224 list_for_each_entry(preferred_mode,
6225 &connector->probed_modes,
6227 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6228 amdgpu_encoder->native_mode = *preferred_mode;
6236 static struct drm_display_mode *
6237 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6239 int hdisplay, int vdisplay)
6241 struct drm_device *dev = encoder->dev;
6242 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6243 struct drm_display_mode *mode = NULL;
6244 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6246 mode = drm_mode_duplicate(dev, native_mode);
6251 mode->hdisplay = hdisplay;
6252 mode->vdisplay = vdisplay;
6253 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6254 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6260 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6261 struct drm_connector *connector)
6263 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6264 struct drm_display_mode *mode = NULL;
6265 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6266 struct amdgpu_dm_connector *amdgpu_dm_connector =
6267 to_amdgpu_dm_connector(connector);
6271 char name[DRM_DISPLAY_MODE_LEN];
6274 } common_modes[] = {
6275 { "640x480", 640, 480},
6276 { "800x600", 800, 600},
6277 { "1024x768", 1024, 768},
6278 { "1280x720", 1280, 720},
6279 { "1280x800", 1280, 800},
6280 {"1280x1024", 1280, 1024},
6281 { "1440x900", 1440, 900},
6282 {"1680x1050", 1680, 1050},
6283 {"1600x1200", 1600, 1200},
6284 {"1920x1080", 1920, 1080},
6285 {"1920x1200", 1920, 1200}
6288 n = ARRAY_SIZE(common_modes);
6290 for (i = 0; i < n; i++) {
6291 struct drm_display_mode *curmode = NULL;
6292 bool mode_existed = false;
6294 if (common_modes[i].w > native_mode->hdisplay ||
6295 common_modes[i].h > native_mode->vdisplay ||
6296 (common_modes[i].w == native_mode->hdisplay &&
6297 common_modes[i].h == native_mode->vdisplay))
6300 list_for_each_entry(curmode, &connector->probed_modes, head) {
6301 if (common_modes[i].w == curmode->hdisplay &&
6302 common_modes[i].h == curmode->vdisplay) {
6303 mode_existed = true;
6311 mode = amdgpu_dm_create_common_mode(encoder,
6312 common_modes[i].name, common_modes[i].w,
6314 drm_mode_probed_add(connector, mode);
6315 amdgpu_dm_connector->num_modes++;
6319 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6322 struct amdgpu_dm_connector *amdgpu_dm_connector =
6323 to_amdgpu_dm_connector(connector);
6326 /* empty probed_modes */
6327 INIT_LIST_HEAD(&connector->probed_modes);
6328 amdgpu_dm_connector->num_modes =
6329 drm_add_edid_modes(connector, edid);
6331 /* sorting the probed modes before calling function
6332 * amdgpu_dm_get_native_mode() since EDID can have
6333 * more than one preferred mode. The modes that are
6334 * later in the probed mode list could be of higher
6335 * and preferred resolution. For example, 3840x2160
6336 * resolution in base EDID preferred timing and 4096x2160
6337 * preferred resolution in DID extension block later.
6339 drm_mode_sort(&connector->probed_modes);
6340 amdgpu_dm_get_native_mode(connector);
6342 amdgpu_dm_connector->num_modes = 0;
6346 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6348 struct amdgpu_dm_connector *amdgpu_dm_connector =
6349 to_amdgpu_dm_connector(connector);
6350 struct drm_encoder *encoder;
6351 struct edid *edid = amdgpu_dm_connector->edid;
6353 encoder = amdgpu_dm_connector_to_encoder(connector);
6355 if (!edid || !drm_edid_is_valid(edid)) {
6356 amdgpu_dm_connector->num_modes =
6357 drm_add_modes_noedid(connector, 640, 480);
6359 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6360 amdgpu_dm_connector_add_common_modes(encoder, connector);
6362 amdgpu_dm_fbc_init(connector);
6364 return amdgpu_dm_connector->num_modes;
6367 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6368 struct amdgpu_dm_connector *aconnector,
6370 struct dc_link *link,
6373 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6376 * Some of the properties below require access to state, like bpc.
6377 * Allocate some default initial connector state with our reset helper.
6379 if (aconnector->base.funcs->reset)
6380 aconnector->base.funcs->reset(&aconnector->base);
6382 aconnector->connector_id = link_index;
6383 aconnector->dc_link = link;
6384 aconnector->base.interlace_allowed = false;
6385 aconnector->base.doublescan_allowed = false;
6386 aconnector->base.stereo_allowed = false;
6387 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6388 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6389 aconnector->audio_inst = -1;
6390 mutex_init(&aconnector->hpd_lock);
6393 * configure support HPD hot plug connector_>polled default value is 0
6394 * which means HPD hot plug not supported
6396 switch (connector_type) {
6397 case DRM_MODE_CONNECTOR_HDMIA:
6398 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6399 aconnector->base.ycbcr_420_allowed =
6400 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6402 case DRM_MODE_CONNECTOR_DisplayPort:
6403 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6404 aconnector->base.ycbcr_420_allowed =
6405 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6407 case DRM_MODE_CONNECTOR_DVID:
6408 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6414 drm_object_attach_property(&aconnector->base.base,
6415 dm->ddev->mode_config.scaling_mode_property,
6416 DRM_MODE_SCALE_NONE);
6418 drm_object_attach_property(&aconnector->base.base,
6419 adev->mode_info.underscan_property,
6421 drm_object_attach_property(&aconnector->base.base,
6422 adev->mode_info.underscan_hborder_property,
6424 drm_object_attach_property(&aconnector->base.base,
6425 adev->mode_info.underscan_vborder_property,
6428 if (!aconnector->mst_port)
6429 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6431 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6432 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6433 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6435 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6436 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6437 drm_object_attach_property(&aconnector->base.base,
6438 adev->mode_info.abm_level_property, 0);
6441 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6442 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6443 connector_type == DRM_MODE_CONNECTOR_eDP) {
6444 drm_object_attach_property(
6445 &aconnector->base.base,
6446 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6448 if (!aconnector->mst_port)
6449 drm_connector_attach_vrr_capable_property(&aconnector->base);
6451 #ifdef CONFIG_DRM_AMD_DC_HDCP
6452 if (adev->dm.hdcp_workqueue)
6453 drm_connector_attach_content_protection_property(&aconnector->base, true);
6458 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6459 struct i2c_msg *msgs, int num)
6461 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6462 struct ddc_service *ddc_service = i2c->ddc_service;
6463 struct i2c_command cmd;
6467 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6472 cmd.number_of_payloads = num;
6473 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6476 for (i = 0; i < num; i++) {
6477 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6478 cmd.payloads[i].address = msgs[i].addr;
6479 cmd.payloads[i].length = msgs[i].len;
6480 cmd.payloads[i].data = msgs[i].buf;
6484 ddc_service->ctx->dc,
6485 ddc_service->ddc_pin->hw_info.ddc_channel,
6489 kfree(cmd.payloads);
6493 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6495 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6498 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6499 .master_xfer = amdgpu_dm_i2c_xfer,
6500 .functionality = amdgpu_dm_i2c_func,
6503 static struct amdgpu_i2c_adapter *
6504 create_i2c(struct ddc_service *ddc_service,
6508 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6509 struct amdgpu_i2c_adapter *i2c;
6511 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6514 i2c->base.owner = THIS_MODULE;
6515 i2c->base.class = I2C_CLASS_DDC;
6516 i2c->base.dev.parent = &adev->pdev->dev;
6517 i2c->base.algo = &amdgpu_dm_i2c_algo;
6518 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6519 i2c_set_adapdata(&i2c->base, i2c);
6520 i2c->ddc_service = ddc_service;
6521 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6528 * Note: this function assumes that dc_link_detect() was called for the
6529 * dc_link which will be represented by this aconnector.
6531 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6532 struct amdgpu_dm_connector *aconnector,
6533 uint32_t link_index,
6534 struct amdgpu_encoder *aencoder)
6538 struct dc *dc = dm->dc;
6539 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6540 struct amdgpu_i2c_adapter *i2c;
6542 link->priv = aconnector;
6544 DRM_DEBUG_DRIVER("%s()\n", __func__);
6546 i2c = create_i2c(link->ddc, link->link_index, &res);
6548 DRM_ERROR("Failed to create i2c adapter data\n");
6552 aconnector->i2c = i2c;
6553 res = i2c_add_adapter(&i2c->base);
6556 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6560 connector_type = to_drm_connector_type(link->connector_signal);
6562 res = drm_connector_init_with_ddc(
6565 &amdgpu_dm_connector_funcs,
6570 DRM_ERROR("connector_init failed\n");
6571 aconnector->connector_id = -1;
6575 drm_connector_helper_add(
6577 &amdgpu_dm_connector_helper_funcs);
6579 amdgpu_dm_connector_init_helper(
6586 drm_connector_attach_encoder(
6587 &aconnector->base, &aencoder->base);
6589 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6590 || connector_type == DRM_MODE_CONNECTOR_eDP)
6591 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6596 aconnector->i2c = NULL;
6601 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6603 switch (adev->mode_info.num_crtc) {
6620 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6621 struct amdgpu_encoder *aencoder,
6622 uint32_t link_index)
6624 struct amdgpu_device *adev = drm_to_adev(dev);
6626 int res = drm_encoder_init(dev,
6628 &amdgpu_dm_encoder_funcs,
6629 DRM_MODE_ENCODER_TMDS,
6632 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6635 aencoder->encoder_id = link_index;
6637 aencoder->encoder_id = -1;
6639 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6644 static void manage_dm_interrupts(struct amdgpu_device *adev,
6645 struct amdgpu_crtc *acrtc,
6649 * We have no guarantee that the frontend index maps to the same
6650 * backend index - some even map to more than one.
6652 * TODO: Use a different interrupt or check DC itself for the mapping.
6655 amdgpu_display_crtc_idx_to_irq_type(
6660 drm_crtc_vblank_on(&acrtc->base);
6663 &adev->pageflip_irq,
6669 &adev->pageflip_irq,
6671 drm_crtc_vblank_off(&acrtc->base);
6675 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6676 struct amdgpu_crtc *acrtc)
6679 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6682 * This reads the current state for the IRQ and force reapplies
6683 * the setting to hardware.
6685 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6689 is_scaling_state_different(const struct dm_connector_state *dm_state,
6690 const struct dm_connector_state *old_dm_state)
6692 if (dm_state->scaling != old_dm_state->scaling)
6694 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6695 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6697 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6698 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6700 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6701 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6706 #ifdef CONFIG_DRM_AMD_DC_HDCP
6707 static bool is_content_protection_different(struct drm_connector_state *state,
6708 const struct drm_connector_state *old_state,
6709 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6711 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6713 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6714 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6715 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6719 /* CP is being re enabled, ignore this */
6720 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6721 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6722 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6726 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6727 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6728 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6729 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6731 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6732 * hot-plug, headless s3, dpms
6734 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6735 aconnector->dc_sink != NULL)
6738 if (old_state->content_protection == state->content_protection)
6741 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6748 static void remove_stream(struct amdgpu_device *adev,
6749 struct amdgpu_crtc *acrtc,
6750 struct dc_stream_state *stream)
6752 /* this is the update mode case */
6754 acrtc->otg_inst = -1;
6755 acrtc->enabled = false;
6758 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6759 struct dc_cursor_position *position)
6761 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6763 int xorigin = 0, yorigin = 0;
6765 position->enable = false;
6769 if (!crtc || !plane->state->fb)
6772 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6773 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6774 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6776 plane->state->crtc_w,
6777 plane->state->crtc_h);
6781 x = plane->state->crtc_x;
6782 y = plane->state->crtc_y;
6784 if (x <= -amdgpu_crtc->max_cursor_width ||
6785 y <= -amdgpu_crtc->max_cursor_height)
6789 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6793 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6796 position->enable = true;
6797 position->translate_by_source = true;
6800 position->x_hotspot = xorigin;
6801 position->y_hotspot = yorigin;
6806 static void handle_cursor_update(struct drm_plane *plane,
6807 struct drm_plane_state *old_plane_state)
6809 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6810 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6811 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6812 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6813 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6814 uint64_t address = afb ? afb->address : 0;
6815 struct dc_cursor_position position;
6816 struct dc_cursor_attributes attributes;
6819 if (!plane->state->fb && !old_plane_state->fb)
6822 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6824 amdgpu_crtc->crtc_id,
6825 plane->state->crtc_w,
6826 plane->state->crtc_h);
6828 ret = get_cursor_position(plane, crtc, &position);
6832 if (!position.enable) {
6833 /* turn off cursor */
6834 if (crtc_state && crtc_state->stream) {
6835 mutex_lock(&adev->dm.dc_lock);
6836 dc_stream_set_cursor_position(crtc_state->stream,
6838 mutex_unlock(&adev->dm.dc_lock);
6843 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6844 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6846 memset(&attributes, 0, sizeof(attributes));
6847 attributes.address.high_part = upper_32_bits(address);
6848 attributes.address.low_part = lower_32_bits(address);
6849 attributes.width = plane->state->crtc_w;
6850 attributes.height = plane->state->crtc_h;
6851 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6852 attributes.rotation_angle = 0;
6853 attributes.attribute_flags.value = 0;
6855 attributes.pitch = attributes.width;
6857 if (crtc_state->stream) {
6858 mutex_lock(&adev->dm.dc_lock);
6859 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6861 DRM_ERROR("DC failed to set cursor attributes\n");
6863 if (!dc_stream_set_cursor_position(crtc_state->stream,
6865 DRM_ERROR("DC failed to set cursor position\n");
6866 mutex_unlock(&adev->dm.dc_lock);
6870 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6873 assert_spin_locked(&acrtc->base.dev->event_lock);
6874 WARN_ON(acrtc->event);
6876 acrtc->event = acrtc->base.state->event;
6878 /* Set the flip status */
6879 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6881 /* Mark this event as consumed */
6882 acrtc->base.state->event = NULL;
6884 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6888 static void update_freesync_state_on_stream(
6889 struct amdgpu_display_manager *dm,
6890 struct dm_crtc_state *new_crtc_state,
6891 struct dc_stream_state *new_stream,
6892 struct dc_plane_state *surface,
6893 u32 flip_timestamp_in_us)
6895 struct mod_vrr_params vrr_params;
6896 struct dc_info_packet vrr_infopacket = {0};
6897 struct amdgpu_device *adev = dm->adev;
6898 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6899 unsigned long flags;
6905 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6906 * For now it's sufficient to just guard against these conditions.
6909 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6912 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6913 vrr_params = acrtc->dm_irq_params.vrr_params;
6916 mod_freesync_handle_preflip(
6917 dm->freesync_module,
6920 flip_timestamp_in_us,
6923 if (adev->family < AMDGPU_FAMILY_AI &&
6924 amdgpu_dm_vrr_active(new_crtc_state)) {
6925 mod_freesync_handle_v_update(dm->freesync_module,
6926 new_stream, &vrr_params);
6928 /* Need to call this before the frame ends. */
6929 dc_stream_adjust_vmin_vmax(dm->dc,
6930 new_crtc_state->stream,
6931 &vrr_params.adjust);
6935 mod_freesync_build_vrr_infopacket(
6936 dm->freesync_module,
6940 TRANSFER_FUNC_UNKNOWN,
6943 new_crtc_state->freesync_timing_changed |=
6944 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6946 sizeof(vrr_params.adjust)) != 0);
6948 new_crtc_state->freesync_vrr_info_changed |=
6949 (memcmp(&new_crtc_state->vrr_infopacket,
6951 sizeof(vrr_infopacket)) != 0);
6953 acrtc->dm_irq_params.vrr_params = vrr_params;
6954 new_crtc_state->vrr_infopacket = vrr_infopacket;
6956 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6957 new_stream->vrr_infopacket = vrr_infopacket;
6959 if (new_crtc_state->freesync_vrr_info_changed)
6960 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6961 new_crtc_state->base.crtc->base.id,
6962 (int)new_crtc_state->base.vrr_enabled,
6963 (int)vrr_params.state);
6965 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6968 static void update_stream_irq_parameters(
6969 struct amdgpu_display_manager *dm,
6970 struct dm_crtc_state *new_crtc_state)
6972 struct dc_stream_state *new_stream = new_crtc_state->stream;
6973 struct mod_vrr_params vrr_params;
6974 struct mod_freesync_config config = new_crtc_state->freesync_config;
6975 struct amdgpu_device *adev = dm->adev;
6976 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6977 unsigned long flags;
6983 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6984 * For now it's sufficient to just guard against these conditions.
6986 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6989 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6990 vrr_params = acrtc->dm_irq_params.vrr_params;
6992 if (new_crtc_state->vrr_supported &&
6993 config.min_refresh_in_uhz &&
6994 config.max_refresh_in_uhz) {
6995 config.state = new_crtc_state->base.vrr_enabled ?
6996 VRR_STATE_ACTIVE_VARIABLE :
6999 config.state = VRR_STATE_UNSUPPORTED;
7002 mod_freesync_build_vrr_params(dm->freesync_module,
7004 &config, &vrr_params);
7006 new_crtc_state->freesync_timing_changed |=
7007 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7008 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7010 new_crtc_state->freesync_config = config;
7011 /* Copy state for access from DM IRQ handler */
7012 acrtc->dm_irq_params.freesync_config = config;
7013 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7014 acrtc->dm_irq_params.vrr_params = vrr_params;
7015 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7018 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7019 struct dm_crtc_state *new_state)
7021 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7022 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7024 if (!old_vrr_active && new_vrr_active) {
7025 /* Transition VRR inactive -> active:
7026 * While VRR is active, we must not disable vblank irq, as a
7027 * reenable after disable would compute bogus vblank/pflip
7028 * timestamps if it likely happened inside display front-porch.
7030 * We also need vupdate irq for the actual core vblank handling
7033 dm_set_vupdate_irq(new_state->base.crtc, true);
7034 drm_crtc_vblank_get(new_state->base.crtc);
7035 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7036 __func__, new_state->base.crtc->base.id);
7037 } else if (old_vrr_active && !new_vrr_active) {
7038 /* Transition VRR active -> inactive:
7039 * Allow vblank irq disable again for fixed refresh rate.
7041 dm_set_vupdate_irq(new_state->base.crtc, false);
7042 drm_crtc_vblank_put(new_state->base.crtc);
7043 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7044 __func__, new_state->base.crtc->base.id);
7048 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7050 struct drm_plane *plane;
7051 struct drm_plane_state *old_plane_state, *new_plane_state;
7055 * TODO: Make this per-stream so we don't issue redundant updates for
7056 * commits with multiple streams.
7058 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7060 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7061 handle_cursor_update(plane, old_plane_state);
7064 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7065 struct dc_state *dc_state,
7066 struct drm_device *dev,
7067 struct amdgpu_display_manager *dm,
7068 struct drm_crtc *pcrtc,
7069 bool wait_for_vblank)
7072 uint64_t timestamp_ns;
7073 struct drm_plane *plane;
7074 struct drm_plane_state *old_plane_state, *new_plane_state;
7075 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7076 struct drm_crtc_state *new_pcrtc_state =
7077 drm_atomic_get_new_crtc_state(state, pcrtc);
7078 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7079 struct dm_crtc_state *dm_old_crtc_state =
7080 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7081 int planes_count = 0, vpos, hpos;
7083 unsigned long flags;
7084 struct amdgpu_bo *abo;
7085 uint32_t target_vblank, last_flip_vblank;
7086 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7087 bool pflip_present = false;
7089 struct dc_surface_update surface_updates[MAX_SURFACES];
7090 struct dc_plane_info plane_infos[MAX_SURFACES];
7091 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7092 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7093 struct dc_stream_update stream_update;
7096 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7099 dm_error("Failed to allocate update bundle\n");
7104 * Disable the cursor first if we're disabling all the planes.
7105 * It'll remain on the screen after the planes are re-enabled
7108 if (acrtc_state->active_planes == 0)
7109 amdgpu_dm_commit_cursors(state);
7111 /* update planes when needed */
7112 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7113 struct drm_crtc *crtc = new_plane_state->crtc;
7114 struct drm_crtc_state *new_crtc_state;
7115 struct drm_framebuffer *fb = new_plane_state->fb;
7116 bool plane_needs_flip;
7117 struct dc_plane_state *dc_plane;
7118 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7120 /* Cursor plane is handled after stream updates */
7121 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7124 if (!fb || !crtc || pcrtc != crtc)
7127 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7128 if (!new_crtc_state->active)
7131 dc_plane = dm_new_plane_state->dc_state;
7133 bundle->surface_updates[planes_count].surface = dc_plane;
7134 if (new_pcrtc_state->color_mgmt_changed) {
7135 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7136 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7137 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7140 fill_dc_scaling_info(new_plane_state,
7141 &bundle->scaling_infos[planes_count]);
7143 bundle->surface_updates[planes_count].scaling_info =
7144 &bundle->scaling_infos[planes_count];
7146 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7148 pflip_present = pflip_present || plane_needs_flip;
7150 if (!plane_needs_flip) {
7155 abo = gem_to_amdgpu_bo(fb->obj[0]);
7158 * Wait for all fences on this FB. Do limited wait to avoid
7159 * deadlock during GPU reset when this fence will not signal
7160 * but we hold reservation lock for the BO.
7162 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7164 msecs_to_jiffies(5000));
7165 if (unlikely(r <= 0))
7166 DRM_ERROR("Waiting for fences timed out!");
7168 fill_dc_plane_info_and_addr(
7169 dm->adev, new_plane_state,
7170 dm_new_plane_state->tiling_flags,
7171 &bundle->plane_infos[planes_count],
7172 &bundle->flip_addrs[planes_count].address,
7173 dm_new_plane_state->tmz_surface, false);
7175 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7176 new_plane_state->plane->index,
7177 bundle->plane_infos[planes_count].dcc.enable);
7179 bundle->surface_updates[planes_count].plane_info =
7180 &bundle->plane_infos[planes_count];
7183 * Only allow immediate flips for fast updates that don't
7184 * change FB pitch, DCC state, rotation or mirroing.
7186 bundle->flip_addrs[planes_count].flip_immediate =
7187 crtc->state->async_flip &&
7188 acrtc_state->update_type == UPDATE_TYPE_FAST;
7190 timestamp_ns = ktime_get_ns();
7191 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7192 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7193 bundle->surface_updates[planes_count].surface = dc_plane;
7195 if (!bundle->surface_updates[planes_count].surface) {
7196 DRM_ERROR("No surface for CRTC: id=%d\n",
7197 acrtc_attach->crtc_id);
7201 if (plane == pcrtc->primary)
7202 update_freesync_state_on_stream(
7205 acrtc_state->stream,
7207 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7209 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7211 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7212 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7218 if (pflip_present) {
7220 /* Use old throttling in non-vrr fixed refresh rate mode
7221 * to keep flip scheduling based on target vblank counts
7222 * working in a backwards compatible way, e.g., for
7223 * clients using the GLX_OML_sync_control extension or
7224 * DRI3/Present extension with defined target_msc.
7226 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7229 /* For variable refresh rate mode only:
7230 * Get vblank of last completed flip to avoid > 1 vrr
7231 * flips per video frame by use of throttling, but allow
7232 * flip programming anywhere in the possibly large
7233 * variable vrr vblank interval for fine-grained flip
7234 * timing control and more opportunity to avoid stutter
7235 * on late submission of flips.
7237 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7238 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7239 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7242 target_vblank = last_flip_vblank + wait_for_vblank;
7245 * Wait until we're out of the vertical blank period before the one
7246 * targeted by the flip
7248 while ((acrtc_attach->enabled &&
7249 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7250 0, &vpos, &hpos, NULL,
7251 NULL, &pcrtc->hwmode)
7252 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7253 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7254 (int)(target_vblank -
7255 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7256 usleep_range(1000, 1100);
7260 * Prepare the flip event for the pageflip interrupt to handle.
7262 * This only works in the case where we've already turned on the
7263 * appropriate hardware blocks (eg. HUBP) so in the transition case
7264 * from 0 -> n planes we have to skip a hardware generated event
7265 * and rely on sending it from software.
7267 if (acrtc_attach->base.state->event &&
7268 acrtc_state->active_planes > 0) {
7269 drm_crtc_vblank_get(pcrtc);
7271 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7273 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7274 prepare_flip_isr(acrtc_attach);
7276 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7279 if (acrtc_state->stream) {
7280 if (acrtc_state->freesync_vrr_info_changed)
7281 bundle->stream_update.vrr_infopacket =
7282 &acrtc_state->stream->vrr_infopacket;
7286 /* Update the planes if changed or disable if we don't have any. */
7287 if ((planes_count || acrtc_state->active_planes == 0) &&
7288 acrtc_state->stream) {
7289 bundle->stream_update.stream = acrtc_state->stream;
7290 if (new_pcrtc_state->mode_changed) {
7291 bundle->stream_update.src = acrtc_state->stream->src;
7292 bundle->stream_update.dst = acrtc_state->stream->dst;
7295 if (new_pcrtc_state->color_mgmt_changed) {
7297 * TODO: This isn't fully correct since we've actually
7298 * already modified the stream in place.
7300 bundle->stream_update.gamut_remap =
7301 &acrtc_state->stream->gamut_remap_matrix;
7302 bundle->stream_update.output_csc_transform =
7303 &acrtc_state->stream->csc_color_matrix;
7304 bundle->stream_update.out_transfer_func =
7305 acrtc_state->stream->out_transfer_func;
7308 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7309 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7310 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7313 * If FreeSync state on the stream has changed then we need to
7314 * re-adjust the min/max bounds now that DC doesn't handle this
7315 * as part of commit.
7317 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7318 amdgpu_dm_vrr_active(acrtc_state)) {
7319 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7320 dc_stream_adjust_vmin_vmax(
7321 dm->dc, acrtc_state->stream,
7322 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7323 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7325 mutex_lock(&dm->dc_lock);
7326 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7327 acrtc_state->stream->link->psr_settings.psr_allow_active)
7328 amdgpu_dm_psr_disable(acrtc_state->stream);
7330 dc_commit_updates_for_stream(dm->dc,
7331 bundle->surface_updates,
7333 acrtc_state->stream,
7334 &bundle->stream_update,
7338 * Enable or disable the interrupts on the backend.
7340 * Most pipes are put into power gating when unused.
7342 * When power gating is enabled on a pipe we lose the
7343 * interrupt enablement state when power gating is disabled.
7345 * So we need to update the IRQ control state in hardware
7346 * whenever the pipe turns on (since it could be previously
7347 * power gated) or off (since some pipes can't be power gated
7350 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7351 dm_update_pflip_irq_state(drm_to_adev(dev),
7354 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7355 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7356 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7357 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7358 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7359 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7360 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7361 amdgpu_dm_psr_enable(acrtc_state->stream);
7364 mutex_unlock(&dm->dc_lock);
7368 * Update cursor state *after* programming all the planes.
7369 * This avoids redundant programming in the case where we're going
7370 * to be disabling a single plane - those pipes are being disabled.
7372 if (acrtc_state->active_planes)
7373 amdgpu_dm_commit_cursors(state);
7379 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7380 struct drm_atomic_state *state)
7382 struct amdgpu_device *adev = drm_to_adev(dev);
7383 struct amdgpu_dm_connector *aconnector;
7384 struct drm_connector *connector;
7385 struct drm_connector_state *old_con_state, *new_con_state;
7386 struct drm_crtc_state *new_crtc_state;
7387 struct dm_crtc_state *new_dm_crtc_state;
7388 const struct dc_stream_status *status;
7391 /* Notify device removals. */
7392 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7393 if (old_con_state->crtc != new_con_state->crtc) {
7394 /* CRTC changes require notification. */
7398 if (!new_con_state->crtc)
7401 new_crtc_state = drm_atomic_get_new_crtc_state(
7402 state, new_con_state->crtc);
7404 if (!new_crtc_state)
7407 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7411 aconnector = to_amdgpu_dm_connector(connector);
7413 mutex_lock(&adev->dm.audio_lock);
7414 inst = aconnector->audio_inst;
7415 aconnector->audio_inst = -1;
7416 mutex_unlock(&adev->dm.audio_lock);
7418 amdgpu_dm_audio_eld_notify(adev, inst);
7421 /* Notify audio device additions. */
7422 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7423 if (!new_con_state->crtc)
7426 new_crtc_state = drm_atomic_get_new_crtc_state(
7427 state, new_con_state->crtc);
7429 if (!new_crtc_state)
7432 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7435 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7436 if (!new_dm_crtc_state->stream)
7439 status = dc_stream_get_status(new_dm_crtc_state->stream);
7443 aconnector = to_amdgpu_dm_connector(connector);
7445 mutex_lock(&adev->dm.audio_lock);
7446 inst = status->audio_inst;
7447 aconnector->audio_inst = inst;
7448 mutex_unlock(&adev->dm.audio_lock);
7450 amdgpu_dm_audio_eld_notify(adev, inst);
7455 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7456 * @crtc_state: the DRM CRTC state
7457 * @stream_state: the DC stream state.
7459 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7460 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7462 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7463 struct dc_stream_state *stream_state)
7465 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7468 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7469 struct drm_atomic_state *state,
7473 * Add check here for SoC's that support hardware cursor plane, to
7474 * unset legacy_cursor_update
7477 return drm_atomic_helper_commit(dev, state, nonblock);
7479 /*TODO Handle EINTR, reenable IRQ*/
7483 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7484 * @state: The atomic state to commit
7486 * This will tell DC to commit the constructed DC state from atomic_check,
7487 * programming the hardware. Any failures here implies a hardware failure, since
7488 * atomic check should have filtered anything non-kosher.
7490 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7492 struct drm_device *dev = state->dev;
7493 struct amdgpu_device *adev = drm_to_adev(dev);
7494 struct amdgpu_display_manager *dm = &adev->dm;
7495 struct dm_atomic_state *dm_state;
7496 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7498 struct drm_crtc *crtc;
7499 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7500 unsigned long flags;
7501 bool wait_for_vblank = true;
7502 struct drm_connector *connector;
7503 struct drm_connector_state *old_con_state, *new_con_state;
7504 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7505 int crtc_disable_count = 0;
7506 bool mode_set_reset_required = false;
7508 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7510 dm_state = dm_atomic_get_new_state(state);
7511 if (dm_state && dm_state->context) {
7512 dc_state = dm_state->context;
7514 /* No state changes, retain current state. */
7515 dc_state_temp = dc_create_state(dm->dc);
7516 ASSERT(dc_state_temp);
7517 dc_state = dc_state_temp;
7518 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7521 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7522 new_crtc_state, i) {
7523 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7525 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7527 if (old_crtc_state->active &&
7528 (!new_crtc_state->active ||
7529 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7530 manage_dm_interrupts(adev, acrtc, false);
7531 dc_stream_release(dm_old_crtc_state->stream);
7535 drm_atomic_helper_calc_timestamping_constants(state);
7537 /* update changed items */
7538 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7539 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7541 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7542 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7545 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7546 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7547 "connectors_changed:%d\n",
7549 new_crtc_state->enable,
7550 new_crtc_state->active,
7551 new_crtc_state->planes_changed,
7552 new_crtc_state->mode_changed,
7553 new_crtc_state->active_changed,
7554 new_crtc_state->connectors_changed);
7556 /* Copy all transient state flags into dc state */
7557 if (dm_new_crtc_state->stream) {
7558 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7559 dm_new_crtc_state->stream);
7562 /* handles headless hotplug case, updating new_state and
7563 * aconnector as needed
7566 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7568 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7570 if (!dm_new_crtc_state->stream) {
7572 * this could happen because of issues with
7573 * userspace notifications delivery.
7574 * In this case userspace tries to set mode on
7575 * display which is disconnected in fact.
7576 * dc_sink is NULL in this case on aconnector.
7577 * We expect reset mode will come soon.
7579 * This can also happen when unplug is done
7580 * during resume sequence ended
7582 * In this case, we want to pretend we still
7583 * have a sink to keep the pipe running so that
7584 * hw state is consistent with the sw state
7586 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7587 __func__, acrtc->base.base.id);
7591 if (dm_old_crtc_state->stream)
7592 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7594 pm_runtime_get_noresume(dev->dev);
7596 acrtc->enabled = true;
7597 acrtc->hw_mode = new_crtc_state->mode;
7598 crtc->hwmode = new_crtc_state->mode;
7599 mode_set_reset_required = true;
7600 } else if (modereset_required(new_crtc_state)) {
7601 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7602 /* i.e. reset mode */
7603 if (dm_old_crtc_state->stream)
7604 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7605 mode_set_reset_required = true;
7607 } /* for_each_crtc_in_state() */
7610 /* if there mode set or reset, disable eDP PSR */
7611 if (mode_set_reset_required)
7612 amdgpu_dm_psr_disable_all(dm);
7614 dm_enable_per_frame_crtc_master_sync(dc_state);
7615 mutex_lock(&dm->dc_lock);
7616 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7617 mutex_unlock(&dm->dc_lock);
7620 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7621 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7623 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7625 if (dm_new_crtc_state->stream != NULL) {
7626 const struct dc_stream_status *status =
7627 dc_stream_get_status(dm_new_crtc_state->stream);
7630 status = dc_stream_get_status_from_state(dc_state,
7631 dm_new_crtc_state->stream);
7633 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7635 acrtc->otg_inst = status->primary_otg_inst;
7638 #ifdef CONFIG_DRM_AMD_DC_HDCP
7639 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7640 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7641 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7642 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7644 new_crtc_state = NULL;
7647 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7649 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7651 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7652 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7653 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7654 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7658 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7659 hdcp_update_display(
7660 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7661 new_con_state->hdcp_content_type,
7662 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7667 /* Handle connector state changes */
7668 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7669 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7670 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7671 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7672 struct dc_surface_update dummy_updates[MAX_SURFACES];
7673 struct dc_stream_update stream_update;
7674 struct dc_info_packet hdr_packet;
7675 struct dc_stream_status *status = NULL;
7676 bool abm_changed, hdr_changed, scaling_changed;
7678 memset(&dummy_updates, 0, sizeof(dummy_updates));
7679 memset(&stream_update, 0, sizeof(stream_update));
7682 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7683 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7686 /* Skip any modesets/resets */
7687 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7690 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7691 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7693 scaling_changed = is_scaling_state_different(dm_new_con_state,
7696 abm_changed = dm_new_crtc_state->abm_level !=
7697 dm_old_crtc_state->abm_level;
7700 is_hdr_metadata_different(old_con_state, new_con_state);
7702 if (!scaling_changed && !abm_changed && !hdr_changed)
7705 stream_update.stream = dm_new_crtc_state->stream;
7706 if (scaling_changed) {
7707 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7708 dm_new_con_state, dm_new_crtc_state->stream);
7710 stream_update.src = dm_new_crtc_state->stream->src;
7711 stream_update.dst = dm_new_crtc_state->stream->dst;
7715 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7717 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7721 fill_hdr_info_packet(new_con_state, &hdr_packet);
7722 stream_update.hdr_static_metadata = &hdr_packet;
7725 status = dc_stream_get_status(dm_new_crtc_state->stream);
7727 WARN_ON(!status->plane_count);
7730 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7731 * Here we create an empty update on each plane.
7732 * To fix this, DC should permit updating only stream properties.
7734 for (j = 0; j < status->plane_count; j++)
7735 dummy_updates[j].surface = status->plane_states[0];
7738 mutex_lock(&dm->dc_lock);
7739 dc_commit_updates_for_stream(dm->dc,
7741 status->plane_count,
7742 dm_new_crtc_state->stream,
7745 mutex_unlock(&dm->dc_lock);
7748 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7749 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7750 new_crtc_state, i) {
7751 if (old_crtc_state->active && !new_crtc_state->active)
7752 crtc_disable_count++;
7754 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7755 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7757 /* For freesync config update on crtc state and params for irq */
7758 update_stream_irq_parameters(dm, dm_new_crtc_state);
7760 /* Handle vrr on->off / off->on transitions */
7761 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7766 * Enable interrupts for CRTCs that are newly enabled or went through
7767 * a modeset. It was intentionally deferred until after the front end
7768 * state was modified to wait until the OTG was on and so the IRQ
7769 * handlers didn't access stale or invalid state.
7771 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7772 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7774 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7776 if (new_crtc_state->active &&
7777 (!old_crtc_state->active ||
7778 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7779 dc_stream_retain(dm_new_crtc_state->stream);
7780 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7781 manage_dm_interrupts(adev, acrtc, true);
7783 #ifdef CONFIG_DEBUG_FS
7785 * Frontend may have changed so reapply the CRC capture
7786 * settings for the stream.
7788 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7790 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7791 amdgpu_dm_crtc_configure_crc_source(
7792 crtc, dm_new_crtc_state,
7793 dm_new_crtc_state->crc_src);
7799 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7800 if (new_crtc_state->async_flip)
7801 wait_for_vblank = false;
7803 /* update planes when needed per crtc*/
7804 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7805 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7807 if (dm_new_crtc_state->stream)
7808 amdgpu_dm_commit_planes(state, dc_state, dev,
7809 dm, crtc, wait_for_vblank);
7812 /* Update audio instances for each connector. */
7813 amdgpu_dm_commit_audio(dev, state);
7816 * send vblank event on all events not handled in flip and
7817 * mark consumed event for drm_atomic_helper_commit_hw_done
7819 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7820 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7822 if (new_crtc_state->event)
7823 drm_send_event_locked(dev, &new_crtc_state->event->base);
7825 new_crtc_state->event = NULL;
7827 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7829 /* Signal HW programming completion */
7830 drm_atomic_helper_commit_hw_done(state);
7832 if (wait_for_vblank)
7833 drm_atomic_helper_wait_for_flip_done(dev, state);
7835 drm_atomic_helper_cleanup_planes(dev, state);
7838 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7839 * so we can put the GPU into runtime suspend if we're not driving any
7842 for (i = 0; i < crtc_disable_count; i++)
7843 pm_runtime_put_autosuspend(dev->dev);
7844 pm_runtime_mark_last_busy(dev->dev);
7847 dc_release_state(dc_state_temp);
7851 static int dm_force_atomic_commit(struct drm_connector *connector)
7854 struct drm_device *ddev = connector->dev;
7855 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7856 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7857 struct drm_plane *plane = disconnected_acrtc->base.primary;
7858 struct drm_connector_state *conn_state;
7859 struct drm_crtc_state *crtc_state;
7860 struct drm_plane_state *plane_state;
7865 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7867 /* Construct an atomic state to restore previous display setting */
7870 * Attach connectors to drm_atomic_state
7872 conn_state = drm_atomic_get_connector_state(state, connector);
7874 ret = PTR_ERR_OR_ZERO(conn_state);
7878 /* Attach crtc to drm_atomic_state*/
7879 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7881 ret = PTR_ERR_OR_ZERO(crtc_state);
7885 /* force a restore */
7886 crtc_state->mode_changed = true;
7888 /* Attach plane to drm_atomic_state */
7889 plane_state = drm_atomic_get_plane_state(state, plane);
7891 ret = PTR_ERR_OR_ZERO(plane_state);
7896 /* Call commit internally with the state we just constructed */
7897 ret = drm_atomic_commit(state);
7902 DRM_ERROR("Restoring old state failed with %i\n", ret);
7903 drm_atomic_state_put(state);
7909 * This function handles all cases when set mode does not come upon hotplug.
7910 * This includes when a display is unplugged then plugged back into the
7911 * same port and when running without usermode desktop manager supprot
7913 void dm_restore_drm_connector_state(struct drm_device *dev,
7914 struct drm_connector *connector)
7916 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7917 struct amdgpu_crtc *disconnected_acrtc;
7918 struct dm_crtc_state *acrtc_state;
7920 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7923 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7924 if (!disconnected_acrtc)
7927 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7928 if (!acrtc_state->stream)
7932 * If the previous sink is not released and different from the current,
7933 * we deduce we are in a state where we can not rely on usermode call
7934 * to turn on the display, so we do it here
7936 if (acrtc_state->stream->sink != aconnector->dc_sink)
7937 dm_force_atomic_commit(&aconnector->base);
7941 * Grabs all modesetting locks to serialize against any blocking commits,
7942 * Waits for completion of all non blocking commits.
7944 static int do_aquire_global_lock(struct drm_device *dev,
7945 struct drm_atomic_state *state)
7947 struct drm_crtc *crtc;
7948 struct drm_crtc_commit *commit;
7952 * Adding all modeset locks to aquire_ctx will
7953 * ensure that when the framework release it the
7954 * extra locks we are locking here will get released to
7956 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7960 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7961 spin_lock(&crtc->commit_lock);
7962 commit = list_first_entry_or_null(&crtc->commit_list,
7963 struct drm_crtc_commit, commit_entry);
7965 drm_crtc_commit_get(commit);
7966 spin_unlock(&crtc->commit_lock);
7972 * Make sure all pending HW programming completed and
7975 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7978 ret = wait_for_completion_interruptible_timeout(
7979 &commit->flip_done, 10*HZ);
7982 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7983 "timed out\n", crtc->base.id, crtc->name);
7985 drm_crtc_commit_put(commit);
7988 return ret < 0 ? ret : 0;
7991 static void get_freesync_config_for_crtc(
7992 struct dm_crtc_state *new_crtc_state,
7993 struct dm_connector_state *new_con_state)
7995 struct mod_freesync_config config = {0};
7996 struct amdgpu_dm_connector *aconnector =
7997 to_amdgpu_dm_connector(new_con_state->base.connector);
7998 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7999 int vrefresh = drm_mode_vrefresh(mode);
8001 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8002 vrefresh >= aconnector->min_vfreq &&
8003 vrefresh <= aconnector->max_vfreq;
8005 if (new_crtc_state->vrr_supported) {
8006 new_crtc_state->stream->ignore_msa_timing_param = true;
8007 config.state = new_crtc_state->base.vrr_enabled ?
8008 VRR_STATE_ACTIVE_VARIABLE :
8010 config.min_refresh_in_uhz =
8011 aconnector->min_vfreq * 1000000;
8012 config.max_refresh_in_uhz =
8013 aconnector->max_vfreq * 1000000;
8014 config.vsif_supported = true;
8018 new_crtc_state->freesync_config = config;
8021 static void reset_freesync_config_for_crtc(
8022 struct dm_crtc_state *new_crtc_state)
8024 new_crtc_state->vrr_supported = false;
8026 memset(&new_crtc_state->vrr_infopacket, 0,
8027 sizeof(new_crtc_state->vrr_infopacket));
8030 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8031 struct drm_atomic_state *state,
8032 struct drm_crtc *crtc,
8033 struct drm_crtc_state *old_crtc_state,
8034 struct drm_crtc_state *new_crtc_state,
8036 bool *lock_and_validation_needed)
8038 struct dm_atomic_state *dm_state = NULL;
8039 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8040 struct dc_stream_state *new_stream;
8044 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8045 * update changed items
8047 struct amdgpu_crtc *acrtc = NULL;
8048 struct amdgpu_dm_connector *aconnector = NULL;
8049 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8050 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8054 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8055 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8056 acrtc = to_amdgpu_crtc(crtc);
8057 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8059 /* TODO This hack should go away */
8060 if (aconnector && enable) {
8061 /* Make sure fake sink is created in plug-in scenario */
8062 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8064 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8067 if (IS_ERR(drm_new_conn_state)) {
8068 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8072 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8073 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8075 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8078 new_stream = create_validate_stream_for_sink(aconnector,
8079 &new_crtc_state->mode,
8081 dm_old_crtc_state->stream);
8084 * we can have no stream on ACTION_SET if a display
8085 * was disconnected during S3, in this case it is not an
8086 * error, the OS will be updated after detection, and
8087 * will do the right thing on next atomic commit
8091 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8092 __func__, acrtc->base.base.id);
8098 * TODO: Check VSDB bits to decide whether this should
8099 * be enabled or not.
8101 new_stream->triggered_crtc_reset.enabled =
8102 dm->force_timing_sync;
8104 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8106 ret = fill_hdr_info_packet(drm_new_conn_state,
8107 &new_stream->hdr_static_metadata);
8112 * If we already removed the old stream from the context
8113 * (and set the new stream to NULL) then we can't reuse
8114 * the old stream even if the stream and scaling are unchanged.
8115 * We'll hit the BUG_ON and black screen.
8117 * TODO: Refactor this function to allow this check to work
8118 * in all conditions.
8120 if (dm_new_crtc_state->stream &&
8121 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8122 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8123 new_crtc_state->mode_changed = false;
8124 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8125 new_crtc_state->mode_changed);
8129 /* mode_changed flag may get updated above, need to check again */
8130 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8134 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8135 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8136 "connectors_changed:%d\n",
8138 new_crtc_state->enable,
8139 new_crtc_state->active,
8140 new_crtc_state->planes_changed,
8141 new_crtc_state->mode_changed,
8142 new_crtc_state->active_changed,
8143 new_crtc_state->connectors_changed);
8145 /* Remove stream for any changed/disabled CRTC */
8148 if (!dm_old_crtc_state->stream)
8151 ret = dm_atomic_get_state(state, &dm_state);
8155 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8158 /* i.e. reset mode */
8159 if (dc_remove_stream_from_ctx(
8162 dm_old_crtc_state->stream) != DC_OK) {
8167 dc_stream_release(dm_old_crtc_state->stream);
8168 dm_new_crtc_state->stream = NULL;
8170 reset_freesync_config_for_crtc(dm_new_crtc_state);
8172 *lock_and_validation_needed = true;
8174 } else {/* Add stream for any updated/enabled CRTC */
8176 * Quick fix to prevent NULL pointer on new_stream when
8177 * added MST connectors not found in existing crtc_state in the chained mode
8178 * TODO: need to dig out the root cause of that
8180 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8183 if (modereset_required(new_crtc_state))
8186 if (modeset_required(new_crtc_state, new_stream,
8187 dm_old_crtc_state->stream)) {
8189 WARN_ON(dm_new_crtc_state->stream);
8191 ret = dm_atomic_get_state(state, &dm_state);
8195 dm_new_crtc_state->stream = new_stream;
8197 dc_stream_retain(new_stream);
8199 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8202 if (dc_add_stream_to_ctx(
8205 dm_new_crtc_state->stream) != DC_OK) {
8210 *lock_and_validation_needed = true;
8215 /* Release extra reference */
8217 dc_stream_release(new_stream);
8220 * We want to do dc stream updates that do not require a
8221 * full modeset below.
8223 if (!(enable && aconnector && new_crtc_state->active))
8226 * Given above conditions, the dc state cannot be NULL because:
8227 * 1. We're in the process of enabling CRTCs (just been added
8228 * to the dc context, or already is on the context)
8229 * 2. Has a valid connector attached, and
8230 * 3. Is currently active and enabled.
8231 * => The dc stream state currently exists.
8233 BUG_ON(dm_new_crtc_state->stream == NULL);
8235 /* Scaling or underscan settings */
8236 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8237 update_stream_scaling_settings(
8238 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8241 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8244 * Color management settings. We also update color properties
8245 * when a modeset is needed, to ensure it gets reprogrammed.
8247 if (dm_new_crtc_state->base.color_mgmt_changed ||
8248 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8249 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8254 /* Update Freesync settings. */
8255 get_freesync_config_for_crtc(dm_new_crtc_state,
8262 dc_stream_release(new_stream);
8266 static bool should_reset_plane(struct drm_atomic_state *state,
8267 struct drm_plane *plane,
8268 struct drm_plane_state *old_plane_state,
8269 struct drm_plane_state *new_plane_state)
8271 struct drm_plane *other;
8272 struct drm_plane_state *old_other_state, *new_other_state;
8273 struct drm_crtc_state *new_crtc_state;
8277 * TODO: Remove this hack once the checks below are sufficient
8278 * enough to determine when we need to reset all the planes on
8281 if (state->allow_modeset)
8284 /* Exit early if we know that we're adding or removing the plane. */
8285 if (old_plane_state->crtc != new_plane_state->crtc)
8288 /* old crtc == new_crtc == NULL, plane not in context. */
8289 if (!new_plane_state->crtc)
8293 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8295 if (!new_crtc_state)
8298 /* CRTC Degamma changes currently require us to recreate planes. */
8299 if (new_crtc_state->color_mgmt_changed)
8302 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8306 * If there are any new primary or overlay planes being added or
8307 * removed then the z-order can potentially change. To ensure
8308 * correct z-order and pipe acquisition the current DC architecture
8309 * requires us to remove and recreate all existing planes.
8311 * TODO: Come up with a more elegant solution for this.
8313 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8314 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8316 if (other->type == DRM_PLANE_TYPE_CURSOR)
8319 if (old_other_state->crtc != new_plane_state->crtc &&
8320 new_other_state->crtc != new_plane_state->crtc)
8323 if (old_other_state->crtc != new_other_state->crtc)
8326 /* Src/dst size and scaling updates. */
8327 if (old_other_state->src_w != new_other_state->src_w ||
8328 old_other_state->src_h != new_other_state->src_h ||
8329 old_other_state->crtc_w != new_other_state->crtc_w ||
8330 old_other_state->crtc_h != new_other_state->crtc_h)
8333 /* Rotation / mirroring updates. */
8334 if (old_other_state->rotation != new_other_state->rotation)
8337 /* Blending updates. */
8338 if (old_other_state->pixel_blend_mode !=
8339 new_other_state->pixel_blend_mode)
8342 /* Alpha updates. */
8343 if (old_other_state->alpha != new_other_state->alpha)
8346 /* Colorspace changes. */
8347 if (old_other_state->color_range != new_other_state->color_range ||
8348 old_other_state->color_encoding != new_other_state->color_encoding)
8351 /* Framebuffer checks fall at the end. */
8352 if (!old_other_state->fb || !new_other_state->fb)
8355 /* Pixel format changes can require bandwidth updates. */
8356 if (old_other_state->fb->format != new_other_state->fb->format)
8359 old_dm_plane_state = to_dm_plane_state(old_other_state);
8360 new_dm_plane_state = to_dm_plane_state(new_other_state);
8362 /* Tiling and DCC changes also require bandwidth updates. */
8363 if (old_dm_plane_state->tiling_flags !=
8364 new_dm_plane_state->tiling_flags)
8371 static int dm_update_plane_state(struct dc *dc,
8372 struct drm_atomic_state *state,
8373 struct drm_plane *plane,
8374 struct drm_plane_state *old_plane_state,
8375 struct drm_plane_state *new_plane_state,
8377 bool *lock_and_validation_needed)
8380 struct dm_atomic_state *dm_state = NULL;
8381 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8382 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8383 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8384 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8385 struct amdgpu_crtc *new_acrtc;
8390 new_plane_crtc = new_plane_state->crtc;
8391 old_plane_crtc = old_plane_state->crtc;
8392 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8393 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8395 /*TODO Implement better atomic check for cursor plane */
8396 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8397 if (!enable || !new_plane_crtc ||
8398 drm_atomic_plane_disabling(plane->state, new_plane_state))
8401 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8403 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8404 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8405 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8406 new_plane_state->crtc_w, new_plane_state->crtc_h);
8413 needs_reset = should_reset_plane(state, plane, old_plane_state,
8416 /* Remove any changed/removed planes */
8421 if (!old_plane_crtc)
8424 old_crtc_state = drm_atomic_get_old_crtc_state(
8425 state, old_plane_crtc);
8426 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8428 if (!dm_old_crtc_state->stream)
8431 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8432 plane->base.id, old_plane_crtc->base.id);
8434 ret = dm_atomic_get_state(state, &dm_state);
8438 if (!dc_remove_plane_from_context(
8440 dm_old_crtc_state->stream,
8441 dm_old_plane_state->dc_state,
8442 dm_state->context)) {
8448 dc_plane_state_release(dm_old_plane_state->dc_state);
8449 dm_new_plane_state->dc_state = NULL;
8451 *lock_and_validation_needed = true;
8453 } else { /* Add new planes */
8454 struct dc_plane_state *dc_new_plane_state;
8456 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8459 if (!new_plane_crtc)
8462 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8463 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8465 if (!dm_new_crtc_state->stream)
8471 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8475 WARN_ON(dm_new_plane_state->dc_state);
8477 dc_new_plane_state = dc_create_plane_state(dc);
8478 if (!dc_new_plane_state)
8481 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8482 plane->base.id, new_plane_crtc->base.id);
8484 ret = fill_dc_plane_attributes(
8485 drm_to_adev(new_plane_crtc->dev),
8490 dc_plane_state_release(dc_new_plane_state);
8494 ret = dm_atomic_get_state(state, &dm_state);
8496 dc_plane_state_release(dc_new_plane_state);
8501 * Any atomic check errors that occur after this will
8502 * not need a release. The plane state will be attached
8503 * to the stream, and therefore part of the atomic
8504 * state. It'll be released when the atomic state is
8507 if (!dc_add_plane_to_context(
8509 dm_new_crtc_state->stream,
8511 dm_state->context)) {
8513 dc_plane_state_release(dc_new_plane_state);
8517 dm_new_plane_state->dc_state = dc_new_plane_state;
8519 /* Tell DC to do a full surface update every time there
8520 * is a plane change. Inefficient, but works for now.
8522 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8524 *lock_and_validation_needed = true;
8531 #if defined(CONFIG_DRM_AMD_DC_DCN)
8532 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8534 struct drm_connector *connector;
8535 struct drm_connector_state *conn_state;
8536 struct amdgpu_dm_connector *aconnector = NULL;
8538 for_each_new_connector_in_state(state, connector, conn_state, i) {
8539 if (conn_state->crtc != crtc)
8542 aconnector = to_amdgpu_dm_connector(connector);
8543 if (!aconnector->port || !aconnector->mst_port)
8552 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8557 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8558 * @dev: The DRM device
8559 * @state: The atomic state to commit
8561 * Validate that the given atomic state is programmable by DC into hardware.
8562 * This involves constructing a &struct dc_state reflecting the new hardware
8563 * state we wish to commit, then querying DC to see if it is programmable. It's
8564 * important not to modify the existing DC state. Otherwise, atomic_check
8565 * may unexpectedly commit hardware changes.
8567 * When validating the DC state, it's important that the right locks are
8568 * acquired. For full updates case which removes/adds/updates streams on one
8569 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8570 * that any such full update commit will wait for completion of any outstanding
8571 * flip using DRMs synchronization events.
8573 * Note that DM adds the affected connectors for all CRTCs in state, when that
8574 * might not seem necessary. This is because DC stream creation requires the
8575 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8576 * be possible but non-trivial - a possible TODO item.
8578 * Return: -Error code if validation failed.
8580 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8581 struct drm_atomic_state *state)
8583 struct amdgpu_device *adev = drm_to_adev(dev);
8584 struct dm_atomic_state *dm_state = NULL;
8585 struct dc *dc = adev->dm.dc;
8586 struct drm_connector *connector;
8587 struct drm_connector_state *old_con_state, *new_con_state;
8588 struct drm_crtc *crtc;
8589 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8590 struct drm_plane *plane;
8591 struct drm_plane_state *old_plane_state, *new_plane_state;
8592 enum dc_status status;
8594 bool lock_and_validation_needed = false;
8596 amdgpu_check_debugfs_connector_property_change(adev, state);
8598 ret = drm_atomic_helper_check_modeset(dev, state);
8602 /* Check connector changes */
8603 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8604 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8605 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8607 /* Skip connectors that are disabled or part of modeset already. */
8608 if (!old_con_state->crtc && !new_con_state->crtc)
8611 if (!new_con_state->crtc)
8614 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8615 if (IS_ERR(new_crtc_state)) {
8616 ret = PTR_ERR(new_crtc_state);
8620 if (dm_old_con_state->abm_level !=
8621 dm_new_con_state->abm_level)
8622 new_crtc_state->connectors_changed = true;
8625 #if defined(CONFIG_DRM_AMD_DC_DCN)
8626 if (adev->asic_type >= CHIP_NAVI10) {
8627 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8628 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8629 ret = add_affected_mst_dsc_crtcs(state, crtc);
8636 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8637 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8638 !new_crtc_state->color_mgmt_changed &&
8639 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8642 if (!new_crtc_state->enable)
8645 ret = drm_atomic_add_affected_connectors(state, crtc);
8649 ret = drm_atomic_add_affected_planes(state, crtc);
8655 * Add all primary and overlay planes on the CRTC to the state
8656 * whenever a plane is enabled to maintain correct z-ordering
8657 * and to enable fast surface updates.
8659 drm_for_each_crtc(crtc, dev) {
8660 bool modified = false;
8662 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8663 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8666 if (new_plane_state->crtc == crtc ||
8667 old_plane_state->crtc == crtc) {
8676 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8677 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8681 drm_atomic_get_plane_state(state, plane);
8683 if (IS_ERR(new_plane_state)) {
8684 ret = PTR_ERR(new_plane_state);
8690 /* Prepass for updating tiling flags on new planes. */
8691 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8692 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8693 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8695 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8696 &new_dm_plane_state->tmz_surface);
8701 /* Remove exiting planes if they are modified */
8702 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8703 ret = dm_update_plane_state(dc, state, plane,
8707 &lock_and_validation_needed);
8712 /* Disable all crtcs which require disable */
8713 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8714 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8718 &lock_and_validation_needed);
8723 /* Enable all crtcs which require enable */
8724 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8725 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8729 &lock_and_validation_needed);
8734 /* Add new/modified planes */
8735 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8736 ret = dm_update_plane_state(dc, state, plane,
8740 &lock_and_validation_needed);
8745 /* Run this here since we want to validate the streams we created */
8746 ret = drm_atomic_helper_check_planes(dev, state);
8750 if (state->legacy_cursor_update) {
8752 * This is a fast cursor update coming from the plane update
8753 * helper, check if it can be done asynchronously for better
8756 state->async_update =
8757 !drm_atomic_helper_async_check(dev, state);
8760 * Skip the remaining global validation if this is an async
8761 * update. Cursor updates can be done without affecting
8762 * state or bandwidth calcs and this avoids the performance
8763 * penalty of locking the private state object and
8764 * allocating a new dc_state.
8766 if (state->async_update)
8770 /* Check scaling and underscan changes*/
8771 /* TODO Removed scaling changes validation due to inability to commit
8772 * new stream into context w\o causing full reset. Need to
8773 * decide how to handle.
8775 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8776 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8777 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8778 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8780 /* Skip any modesets/resets */
8781 if (!acrtc || drm_atomic_crtc_needs_modeset(
8782 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8785 /* Skip any thing not scale or underscan changes */
8786 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8789 lock_and_validation_needed = true;
8793 * Streams and planes are reset when there are changes that affect
8794 * bandwidth. Anything that affects bandwidth needs to go through
8795 * DC global validation to ensure that the configuration can be applied
8798 * We have to currently stall out here in atomic_check for outstanding
8799 * commits to finish in this case because our IRQ handlers reference
8800 * DRM state directly - we can end up disabling interrupts too early
8803 * TODO: Remove this stall and drop DM state private objects.
8805 if (lock_and_validation_needed) {
8806 ret = dm_atomic_get_state(state, &dm_state);
8810 ret = do_aquire_global_lock(dev, state);
8814 #if defined(CONFIG_DRM_AMD_DC_DCN)
8815 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8818 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8824 * Perform validation of MST topology in the state:
8825 * We need to perform MST atomic check before calling
8826 * dc_validate_global_state(), or there is a chance
8827 * to get stuck in an infinite loop and hang eventually.
8829 ret = drm_dp_mst_atomic_check(state);
8832 status = dc_validate_global_state(dc, dm_state->context, false);
8833 if (status != DC_OK) {
8834 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8835 dc_status_to_str(status), status);
8841 * The commit is a fast update. Fast updates shouldn't change
8842 * the DC context, affect global validation, and can have their
8843 * commit work done in parallel with other commits not touching
8844 * the same resource. If we have a new DC context as part of
8845 * the DM atomic state from validation we need to free it and
8846 * retain the existing one instead.
8848 * Furthermore, since the DM atomic state only contains the DC
8849 * context and can safely be annulled, we can free the state
8850 * and clear the associated private object now to free
8851 * some memory and avoid a possible use-after-free later.
8854 for (i = 0; i < state->num_private_objs; i++) {
8855 struct drm_private_obj *obj = state->private_objs[i].ptr;
8857 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8858 int j = state->num_private_objs-1;
8860 dm_atomic_destroy_state(obj,
8861 state->private_objs[i].state);
8863 /* If i is not at the end of the array then the
8864 * last element needs to be moved to where i was
8865 * before the array can safely be truncated.
8868 state->private_objs[i] =
8869 state->private_objs[j];
8871 state->private_objs[j].ptr = NULL;
8872 state->private_objs[j].state = NULL;
8873 state->private_objs[j].old_state = NULL;
8874 state->private_objs[j].new_state = NULL;
8876 state->num_private_objs = j;
8882 /* Store the overall update type for use later in atomic check. */
8883 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8884 struct dm_crtc_state *dm_new_crtc_state =
8885 to_dm_crtc_state(new_crtc_state);
8887 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8892 /* Must be success */
8897 if (ret == -EDEADLK)
8898 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8899 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8900 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8902 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8907 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8908 struct amdgpu_dm_connector *amdgpu_dm_connector)
8911 bool capable = false;
8913 if (amdgpu_dm_connector->dc_link &&
8914 dm_helpers_dp_read_dpcd(
8916 amdgpu_dm_connector->dc_link,
8917 DP_DOWN_STREAM_PORT_COUNT,
8919 sizeof(dpcd_data))) {
8920 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8925 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8929 bool edid_check_required;
8930 struct detailed_timing *timing;
8931 struct detailed_non_pixel *data;
8932 struct detailed_data_monitor_range *range;
8933 struct amdgpu_dm_connector *amdgpu_dm_connector =
8934 to_amdgpu_dm_connector(connector);
8935 struct dm_connector_state *dm_con_state = NULL;
8937 struct drm_device *dev = connector->dev;
8938 struct amdgpu_device *adev = drm_to_adev(dev);
8939 bool freesync_capable = false;
8941 if (!connector->state) {
8942 DRM_ERROR("%s - Connector has no state", __func__);
8947 dm_con_state = to_dm_connector_state(connector->state);
8949 amdgpu_dm_connector->min_vfreq = 0;
8950 amdgpu_dm_connector->max_vfreq = 0;
8951 amdgpu_dm_connector->pixel_clock_mhz = 0;
8956 dm_con_state = to_dm_connector_state(connector->state);
8958 edid_check_required = false;
8959 if (!amdgpu_dm_connector->dc_sink) {
8960 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8963 if (!adev->dm.freesync_module)
8966 * if edid non zero restrict freesync only for dp and edp
8969 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8970 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8971 edid_check_required = is_dp_capable_without_timing_msa(
8973 amdgpu_dm_connector);
8976 if (edid_check_required == true && (edid->version > 1 ||
8977 (edid->version == 1 && edid->revision > 1))) {
8978 for (i = 0; i < 4; i++) {
8980 timing = &edid->detailed_timings[i];
8981 data = &timing->data.other_data;
8982 range = &data->data.range;
8984 * Check if monitor has continuous frequency mode
8986 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8989 * Check for flag range limits only. If flag == 1 then
8990 * no additional timing information provided.
8991 * Default GTF, GTF Secondary curve and CVT are not
8994 if (range->flags != 1)
8997 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8998 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8999 amdgpu_dm_connector->pixel_clock_mhz =
9000 range->pixel_clock_mhz * 10;
9004 if (amdgpu_dm_connector->max_vfreq -
9005 amdgpu_dm_connector->min_vfreq > 10) {
9007 freesync_capable = true;
9013 dm_con_state->freesync_capable = freesync_capable;
9015 if (connector->vrr_capable_property)
9016 drm_connector_set_vrr_capable_property(connector,
9020 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9022 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9024 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9026 if (link->type == dc_connection_none)
9028 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9029 dpcd_data, sizeof(dpcd_data))) {
9030 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9032 if (dpcd_data[0] == 0) {
9033 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9034 link->psr_settings.psr_feature_enabled = false;
9036 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9037 link->psr_settings.psr_feature_enabled = true;
9040 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9045 * amdgpu_dm_link_setup_psr() - configure psr link
9046 * @stream: stream state
9048 * Return: true if success
9050 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9052 struct dc_link *link = NULL;
9053 struct psr_config psr_config = {0};
9054 struct psr_context psr_context = {0};
9060 link = stream->link;
9062 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9064 if (psr_config.psr_version > 0) {
9065 psr_config.psr_exit_link_training_required = 0x1;
9066 psr_config.psr_frame_capture_indication_req = 0;
9067 psr_config.psr_rfb_setup_time = 0x37;
9068 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9069 psr_config.allow_smu_optimizations = 0x0;
9071 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9074 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9080 * amdgpu_dm_psr_enable() - enable psr f/w
9081 * @stream: stream state
9083 * Return: true if success
9085 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9087 struct dc_link *link = stream->link;
9088 unsigned int vsync_rate_hz = 0;
9089 struct dc_static_screen_params params = {0};
9090 /* Calculate number of static frames before generating interrupt to
9093 // Init fail safe of 2 frames static
9094 unsigned int num_frames_static = 2;
9096 DRM_DEBUG_DRIVER("Enabling psr...\n");
9098 vsync_rate_hz = div64_u64(div64_u64((
9099 stream->timing.pix_clk_100hz * 100),
9100 stream->timing.v_total),
9101 stream->timing.h_total);
9104 * Calculate number of frames such that at least 30 ms of time has
9107 if (vsync_rate_hz != 0) {
9108 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9109 num_frames_static = (30000 / frame_time_microsec) + 1;
9112 params.triggers.cursor_update = true;
9113 params.triggers.overlay_update = true;
9114 params.triggers.surface_update = true;
9115 params.num_frames = num_frames_static;
9117 dc_stream_set_static_screen_params(link->ctx->dc,
9121 return dc_link_set_psr_allow_active(link, true, false);
9125 * amdgpu_dm_psr_disable() - disable psr f/w
9126 * @stream: stream state
9128 * Return: true if success
9130 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9133 DRM_DEBUG_DRIVER("Disabling psr...\n");
9135 return dc_link_set_psr_allow_active(stream->link, false, true);
9139 * amdgpu_dm_psr_disable() - disable psr f/w
9140 * if psr is enabled on any stream
9142 * Return: true if success
9144 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9146 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9147 return dc_set_psr_allow_active(dm->dc, false);
9150 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9152 struct amdgpu_device *adev = drm_to_adev(dev);
9153 struct dc *dc = adev->dm.dc;
9156 mutex_lock(&adev->dm.dc_lock);
9157 if (dc->current_state) {
9158 for (i = 0; i < dc->current_state->stream_count; ++i)
9159 dc->current_state->streams[i]
9160 ->triggered_crtc_reset.enabled =
9161 adev->dm.force_timing_sync;
9163 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9164 dc_trigger_sync(dc, dc->current_state);
9166 mutex_unlock(&adev->dm.dc_lock);