2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
89 #include "soc15_common.h"
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
128 * The root control structure is &struct amdgpu_display_manager.
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 * dm_vblank_get_counter
220 * Get counter for number of vertical blanks
223 * struct amdgpu_device *adev - [in] desired amdgpu device
224 * int disp_idx - [in] which CRTC to get the counter from
227 * Counter for vertical blanks
229 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
231 if (crtc >= adev->mode_info.num_crtc)
234 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
236 if (acrtc->dm_irq_params.stream == NULL) {
237 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 u32 *vbl, u32 *position)
249 uint32_t v_blank_start, v_blank_end, h_position, v_position;
251 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
256 if (acrtc->dm_irq_params.stream == NULL) {
257 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 * TODO rework base driver to use values directly.
264 * for now parse it back into reg-format
266 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 *position = v_position | (h_position << 16);
273 *vbl = v_blank_start | (v_blank_end << 16);
279 static bool dm_is_idle(void *handle)
285 static int dm_wait_for_idle(void *handle)
291 static bool dm_check_soft_reset(void *handle)
296 static int dm_soft_reset(void *handle)
302 static struct amdgpu_crtc *
303 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306 struct drm_device *dev = adev_to_drm(adev);
307 struct drm_crtc *crtc;
308 struct amdgpu_crtc *amdgpu_crtc;
310 if (otg_inst == -1) {
312 return adev->mode_info.crtcs[0];
315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316 amdgpu_crtc = to_amdgpu_crtc(crtc);
318 if (amdgpu_crtc->otg_inst == otg_inst)
325 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
327 return acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_VARIABLE ||
329 acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_FIXED;
333 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
335 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 * dm_pflip_high_irq() - Handle pageflip interrupt
341 * @interrupt_params: ignored
343 * Handles the pageflip interrupt by notifying all interested parties
344 * that the pageflip has been completed.
346 static void dm_pflip_high_irq(void *interrupt_params)
348 struct amdgpu_crtc *amdgpu_crtc;
349 struct common_irq_params *irq_params = interrupt_params;
350 struct amdgpu_device *adev = irq_params->adev;
352 struct drm_pending_vblank_event *e;
353 uint32_t vpos, hpos, v_blank_start, v_blank_end;
356 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
358 /* IRQ could occur when in initial stage */
359 /* TODO work and BO cleanup */
360 if (amdgpu_crtc == NULL) {
361 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
365 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
367 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369 amdgpu_crtc->pflip_status,
370 AMDGPU_FLIP_SUBMITTED,
371 amdgpu_crtc->crtc_id,
373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
377 /* page flip completed. */
378 e = amdgpu_crtc->event;
379 amdgpu_crtc->event = NULL;
384 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
386 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
388 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
389 &v_blank_end, &hpos, &vpos) ||
390 (vpos < v_blank_start)) {
391 /* Update to correct count and vblank timestamp if racing with
392 * vblank irq. This also updates to the correct vblank timestamp
393 * even in VRR mode, as scanout is past the front-porch atm.
395 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
397 /* Wake up userspace by sending the pageflip event with proper
398 * count and timestamp of vblank of flip completion.
401 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
403 /* Event sent, so done with vblank for this flip */
404 drm_crtc_vblank_put(&amdgpu_crtc->base);
407 /* VRR active and inside front-porch: vblank count and
408 * timestamp for pageflip event will only be up to date after
409 * drm_crtc_handle_vblank() has been executed from late vblank
410 * irq handler after start of back-porch (vline 0). We queue the
411 * pageflip event for send-out by drm_crtc_handle_vblank() with
412 * updated timestamp and count, once it runs after us.
414 * We need to open-code this instead of using the helper
415 * drm_crtc_arm_vblank_event(), as that helper would
416 * call drm_crtc_accurate_vblank_count(), which we must
417 * not call in VRR mode while we are in front-porch!
420 /* sequence will be replaced by real count during send-out. */
421 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422 e->pipe = amdgpu_crtc->crtc_id;
424 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
428 /* Keep track of vblank of this flip for flip throttling. We use the
429 * cooked hw counter, as that one incremented at start of this vblank
430 * of pageflip completion, so last_flip_vblank is the forbidden count
431 * for queueing new pageflips if vsync + VRR is enabled.
433 amdgpu_crtc->dm_irq_params.last_flip_vblank =
434 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
436 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
437 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
439 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440 amdgpu_crtc->crtc_id, amdgpu_crtc,
441 vrr_active, (int) !e);
444 static void dm_vupdate_high_irq(void *interrupt_params)
446 struct common_irq_params *irq_params = interrupt_params;
447 struct amdgpu_device *adev = irq_params->adev;
448 struct amdgpu_crtc *acrtc;
452 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
455 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
457 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
461 /* Core vblank handling is done here after end of front-porch in
462 * vrr mode, as vblank timestamping will give valid results
463 * while now done after front-porch. This will also deliver
464 * page-flip completion events that have been queued to us
465 * if a pageflip happened inside front-porch.
468 drm_crtc_handle_vblank(&acrtc->base);
470 /* BTR processing for pre-DCE12 ASICs */
471 if (acrtc->dm_irq_params.stream &&
472 adev->family < AMDGPU_FAMILY_AI) {
473 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
474 mod_freesync_handle_v_update(
475 adev->dm.freesync_module,
476 acrtc->dm_irq_params.stream,
477 &acrtc->dm_irq_params.vrr_params);
479 dc_stream_adjust_vmin_vmax(
481 acrtc->dm_irq_params.stream,
482 &acrtc->dm_irq_params.vrr_params.adjust);
483 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
490 * dm_crtc_high_irq() - Handles CRTC interrupt
491 * @interrupt_params: used for determining the CRTC instance
493 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
496 static void dm_crtc_high_irq(void *interrupt_params)
498 struct common_irq_params *irq_params = interrupt_params;
499 struct amdgpu_device *adev = irq_params->adev;
500 struct amdgpu_crtc *acrtc;
504 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
508 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
510 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
511 vrr_active, acrtc->dm_irq_params.active_planes);
514 * Core vblank handling at start of front-porch is only possible
515 * in non-vrr mode, as only there vblank timestamping will give
516 * valid results while done in front-porch. Otherwise defer it
517 * to dm_vupdate_high_irq after end of front-porch.
520 drm_crtc_handle_vblank(&acrtc->base);
523 * Following stuff must happen at start of vblank, for crc
524 * computation and below-the-range btr support in vrr mode.
526 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
528 /* BTR updates need to happen before VUPDATE on Vega and above. */
529 if (adev->family < AMDGPU_FAMILY_AI)
532 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
534 if (acrtc->dm_irq_params.stream &&
535 acrtc->dm_irq_params.vrr_params.supported &&
536 acrtc->dm_irq_params.freesync_config.state ==
537 VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(adev->dm.freesync_module,
539 acrtc->dm_irq_params.stream,
540 &acrtc->dm_irq_params.vrr_params);
542 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543 &acrtc->dm_irq_params.vrr_params.adjust);
547 * If there aren't any active_planes then DCH HUBP may be clock-gated.
548 * In that case, pageflip completion interrupts won't fire and pageflip
549 * completion events won't get delivered. Prevent this by sending
550 * pending pageflip events from here if a flip is still pending.
552 * If any planes are enabled, use dm_pflip_high_irq() instead, to
553 * avoid race conditions between flip programming and completion,
554 * which could cause too early flip completion events.
556 if (adev->family >= AMDGPU_FAMILY_RV &&
557 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558 acrtc->dm_irq_params.active_planes == 0) {
560 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
562 drm_crtc_vblank_put(&acrtc->base);
564 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
570 static int dm_set_clockgating_state(void *handle,
571 enum amd_clockgating_state state)
576 static int dm_set_powergating_state(void *handle,
577 enum amd_powergating_state state)
582 /* Prototypes of private functions */
583 static int dm_early_init(void* handle);
585 /* Allocate memory for FBC compressed data */
586 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
588 struct drm_device *dev = connector->dev;
589 struct amdgpu_device *adev = drm_to_adev(dev);
590 struct dm_compressor_info *compressor = &adev->dm.compressor;
591 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592 struct drm_display_mode *mode;
593 unsigned long max_size = 0;
595 if (adev->dm.dc->fbc_compressor == NULL)
598 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
601 if (compressor->bo_ptr)
605 list_for_each_entry(mode, &connector->modes, head) {
606 if (max_size < mode->htotal * mode->vtotal)
607 max_size = mode->htotal * mode->vtotal;
611 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
612 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
613 &compressor->gpu_addr, &compressor->cpu_addr);
616 DRM_ERROR("DM: Failed to initialize FBC\n");
618 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
626 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627 int pipe, bool *enabled,
628 unsigned char *buf, int max_bytes)
630 struct drm_device *dev = dev_get_drvdata(kdev);
631 struct amdgpu_device *adev = drm_to_adev(dev);
632 struct drm_connector *connector;
633 struct drm_connector_list_iter conn_iter;
634 struct amdgpu_dm_connector *aconnector;
639 mutex_lock(&adev->dm.audio_lock);
641 drm_connector_list_iter_begin(dev, &conn_iter);
642 drm_for_each_connector_iter(connector, &conn_iter) {
643 aconnector = to_amdgpu_dm_connector(connector);
644 if (aconnector->audio_inst != port)
648 ret = drm_eld_size(connector->eld);
649 memcpy(buf, connector->eld, min(max_bytes, ret));
653 drm_connector_list_iter_end(&conn_iter);
655 mutex_unlock(&adev->dm.audio_lock);
657 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663 .get_eld = amdgpu_dm_audio_component_get_eld,
666 static int amdgpu_dm_audio_component_bind(struct device *kdev,
667 struct device *hda_kdev, void *data)
669 struct drm_device *dev = dev_get_drvdata(kdev);
670 struct amdgpu_device *adev = drm_to_adev(dev);
671 struct drm_audio_component *acomp = data;
673 acomp->ops = &amdgpu_dm_audio_component_ops;
675 adev->dm.audio_component = acomp;
680 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681 struct device *hda_kdev, void *data)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_audio_component *acomp = data;
689 adev->dm.audio_component = NULL;
692 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693 .bind = amdgpu_dm_audio_component_bind,
694 .unbind = amdgpu_dm_audio_component_unbind,
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
704 adev->mode_info.audio.enabled = true;
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 adev->dm.audio_registered = true;
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 if (!adev->mode_info.audio.enabled)
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false;
742 /* TODO: Disable audio? */
744 adev->mode_info.audio.enabled = false;
747 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
749 struct drm_audio_component *acomp = adev->dm.audio_component;
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 /* DMUB isn't supported on the ASIC. */
778 DRM_ERROR("No framebuffer info for DMUB service.\n");
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n");
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n");
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 le32_to_cpu(hdr->inst_const_bytes);
809 /* Copy firmware and bios info into FB memory. */
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 fw_bss_data, fw_bss_data_size);
829 /* Copy firmware bios info into FB memory. */
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true;
853 hw_params.psp_version = dmcu->psp_version;
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
869 /* Init DMCU and ABM if available. */
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876 if (!adev->dm.dc->ctx->dmub_srv) {
877 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882 adev->dm.dmcub_fw_version);
887 #if defined(CONFIG_DRM_AMD_DC_DCN)
888 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
891 uint32_t logical_addr_low;
892 uint32_t logical_addr_high;
893 uint32_t agp_base, agp_bot, agp_top;
894 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
896 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
899 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
901 * Raven2 has a HW issue that it is unable to use the vram which
902 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903 * workaround that increase system aperture high address (add 1)
904 * to get rid of the VM fault and hardware hang.
906 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
908 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
911 agp_bot = adev->gmc.agp_start >> 24;
912 agp_top = adev->gmc.agp_end >> 24;
915 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920 page_table_base.low_part = lower_32_bits(pt_base);
922 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
925 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
929 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
933 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
937 pa_config->is_hvm_enabled = 0;
942 #ifdef CONFIG_DEBUG_FS
943 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
945 dm->crc_win_x_start_property =
946 drm_property_create_range(adev_to_drm(dm->adev),
947 DRM_MODE_PROP_ATOMIC,
948 "AMD_CRC_WIN_X_START", 0, U16_MAX);
949 if (!dm->crc_win_x_start_property)
952 dm->crc_win_y_start_property =
953 drm_property_create_range(adev_to_drm(dm->adev),
954 DRM_MODE_PROP_ATOMIC,
955 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
956 if (!dm->crc_win_y_start_property)
959 dm->crc_win_x_end_property =
960 drm_property_create_range(adev_to_drm(dm->adev),
961 DRM_MODE_PROP_ATOMIC,
962 "AMD_CRC_WIN_X_END", 0, U16_MAX);
963 if (!dm->crc_win_x_end_property)
966 dm->crc_win_y_end_property =
967 drm_property_create_range(adev_to_drm(dm->adev),
968 DRM_MODE_PROP_ATOMIC,
969 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
970 if (!dm->crc_win_y_end_property)
977 static int amdgpu_dm_init(struct amdgpu_device *adev)
979 struct dc_init_data init_data;
980 #ifdef CONFIG_DRM_AMD_DC_HDCP
981 struct dc_callback_init init_params;
985 adev->dm.ddev = adev_to_drm(adev);
986 adev->dm.adev = adev;
988 /* Zero all the fields */
989 memset(&init_data, 0, sizeof(init_data));
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 memset(&init_params, 0, sizeof(init_params));
994 mutex_init(&adev->dm.dc_lock);
995 mutex_init(&adev->dm.audio_lock);
997 if(amdgpu_dm_irq_init(adev)) {
998 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1002 init_data.asic_id.chip_family = adev->family;
1004 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1005 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1007 init_data.asic_id.vram_width = adev->gmc.vram_width;
1008 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1009 init_data.asic_id.atombios_base_address =
1010 adev->mode_info.atom_context->bios;
1012 init_data.driver = adev;
1014 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1016 if (!adev->dm.cgs_device) {
1017 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1021 init_data.cgs_device = adev->dm.cgs_device;
1023 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1025 switch (adev->asic_type) {
1030 init_data.flags.gpu_vm_support = true;
1031 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1032 init_data.flags.disable_dmcu = true;
1034 #if defined(CONFIG_DRM_AMD_DC_DCN)
1036 init_data.flags.gpu_vm_support = true;
1043 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1044 init_data.flags.fbc_support = true;
1046 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1047 init_data.flags.multi_mon_pp_mclk_switch = true;
1049 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1050 init_data.flags.disable_fractional_pwm = true;
1052 init_data.flags.power_down_display_on_boot = true;
1054 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1056 /* Display Core create. */
1057 adev->dm.dc = dc_create(&init_data);
1060 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1062 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1066 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1067 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1068 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1071 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1072 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1074 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1075 adev->dm.dc->debug.disable_stutter = true;
1077 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1078 adev->dm.dc->debug.disable_dsc = true;
1080 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1081 adev->dm.dc->debug.disable_clock_gate = true;
1083 r = dm_dmub_hw_init(adev);
1085 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1089 dc_hardware_init(adev->dm.dc);
1091 #if defined(CONFIG_DRM_AMD_DC_DCN)
1092 if (adev->apu_flags) {
1093 struct dc_phy_addr_space_config pa_config;
1095 mmhub_read_system_context(adev, &pa_config);
1097 // Call the DC init_memory func
1098 dc_setup_system_context(adev->dm.dc, &pa_config);
1102 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1103 if (!adev->dm.freesync_module) {
1105 "amdgpu: failed to initialize freesync_module.\n");
1107 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1108 adev->dm.freesync_module);
1110 amdgpu_dm_init_color_mod();
1112 #ifdef CONFIG_DRM_AMD_DC_HDCP
1113 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1114 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1116 if (!adev->dm.hdcp_workqueue)
1117 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1119 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1121 dc_init_callbacks(adev->dm.dc, &init_params);
1124 #ifdef CONFIG_DEBUG_FS
1125 if (create_crtc_crc_properties(&adev->dm))
1126 DRM_ERROR("amdgpu: failed to create crc property.\n");
1128 if (amdgpu_dm_initialize_drm_device(adev)) {
1130 "amdgpu: failed to initialize sw for display support.\n");
1134 /* create fake encoders for MST */
1135 dm_dp_create_fake_mst_encoders(adev);
1137 /* TODO: Add_display_info? */
1139 /* TODO use dynamic cursor width */
1140 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1141 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1143 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1145 "amdgpu: failed to initialize sw for display support.\n");
1150 DRM_DEBUG_DRIVER("KMS initialized.\n");
1154 amdgpu_dm_fini(adev);
1159 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1163 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1164 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 amdgpu_dm_audio_fini(adev);
1169 amdgpu_dm_destroy_drm_device(&adev->dm);
1171 #ifdef CONFIG_DRM_AMD_DC_HDCP
1172 if (adev->dm.hdcp_workqueue) {
1173 hdcp_destroy(adev->dm.hdcp_workqueue);
1174 adev->dm.hdcp_workqueue = NULL;
1178 dc_deinit_callbacks(adev->dm.dc);
1180 if (adev->dm.dc->ctx->dmub_srv) {
1181 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1182 adev->dm.dc->ctx->dmub_srv = NULL;
1185 if (adev->dm.dmub_bo)
1186 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1187 &adev->dm.dmub_bo_gpu_addr,
1188 &adev->dm.dmub_bo_cpu_addr);
1190 /* DC Destroy TODO: Replace destroy DAL */
1192 dc_destroy(&adev->dm.dc);
1194 * TODO: pageflip, vlank interrupt
1196 * amdgpu_dm_irq_fini(adev);
1199 if (adev->dm.cgs_device) {
1200 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1201 adev->dm.cgs_device = NULL;
1203 if (adev->dm.freesync_module) {
1204 mod_freesync_destroy(adev->dm.freesync_module);
1205 adev->dm.freesync_module = NULL;
1208 mutex_destroy(&adev->dm.audio_lock);
1209 mutex_destroy(&adev->dm.dc_lock);
1214 static int load_dmcu_fw(struct amdgpu_device *adev)
1216 const char *fw_name_dmcu = NULL;
1218 const struct dmcu_firmware_header_v1_0 *hdr;
1220 switch(adev->asic_type) {
1221 #if defined(CONFIG_DRM_AMD_DC_SI)
1236 case CHIP_POLARIS11:
1237 case CHIP_POLARIS10:
1238 case CHIP_POLARIS12:
1246 case CHIP_SIENNA_CICHLID:
1247 case CHIP_NAVY_FLOUNDER:
1248 case CHIP_DIMGREY_CAVEFISH:
1252 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1256 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1257 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1258 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1263 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1267 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1268 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1272 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1274 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1275 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1276 adev->dm.fw_dmcu = NULL;
1280 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1285 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1287 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1289 release_firmware(adev->dm.fw_dmcu);
1290 adev->dm.fw_dmcu = NULL;
1294 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1295 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1296 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1297 adev->firmware.fw_size +=
1298 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1301 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1302 adev->firmware.fw_size +=
1303 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1305 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1307 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1312 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1314 struct amdgpu_device *adev = ctx;
1316 return dm_read_reg(adev->dm.dc->ctx, address);
1319 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 struct amdgpu_device *adev = ctx;
1324 return dm_write_reg(adev->dm.dc->ctx, address, value);
1327 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1329 struct dmub_srv_create_params create_params;
1330 struct dmub_srv_region_params region_params;
1331 struct dmub_srv_region_info region_info;
1332 struct dmub_srv_fb_params fb_params;
1333 struct dmub_srv_fb_info *fb_info;
1334 struct dmub_srv *dmub_srv;
1335 const struct dmcub_firmware_header_v1_0 *hdr;
1336 const char *fw_name_dmub;
1337 enum dmub_asic dmub_asic;
1338 enum dmub_status status;
1341 switch (adev->asic_type) {
1343 dmub_asic = DMUB_ASIC_DCN21;
1344 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1345 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1346 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1348 case CHIP_SIENNA_CICHLID:
1349 dmub_asic = DMUB_ASIC_DCN30;
1350 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1352 case CHIP_NAVY_FLOUNDER:
1353 dmub_asic = DMUB_ASIC_DCN30;
1354 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1357 dmub_asic = DMUB_ASIC_DCN301;
1358 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1360 case CHIP_DIMGREY_CAVEFISH:
1361 dmub_asic = DMUB_ASIC_DCN302;
1362 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1366 /* ASIC doesn't support DMUB. */
1370 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1372 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1376 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1378 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1382 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1384 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1385 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1386 AMDGPU_UCODE_ID_DMCUB;
1387 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1389 adev->firmware.fw_size +=
1390 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1392 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1393 adev->dm.dmcub_fw_version);
1396 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1398 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1399 dmub_srv = adev->dm.dmub_srv;
1402 DRM_ERROR("Failed to allocate DMUB service!\n");
1406 memset(&create_params, 0, sizeof(create_params));
1407 create_params.user_ctx = adev;
1408 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1409 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1410 create_params.asic = dmub_asic;
1412 /* Create the DMUB service. */
1413 status = dmub_srv_create(dmub_srv, &create_params);
1414 if (status != DMUB_STATUS_OK) {
1415 DRM_ERROR("Error creating DMUB service: %d\n", status);
1419 /* Calculate the size of all the regions for the DMUB service. */
1420 memset(®ion_params, 0, sizeof(region_params));
1422 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1423 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1424 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1425 region_params.vbios_size = adev->bios_size;
1426 region_params.fw_bss_data = region_params.bss_data_size ?
1427 adev->dm.dmub_fw->data +
1428 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1429 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1430 region_params.fw_inst_const =
1431 adev->dm.dmub_fw->data +
1432 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1438 if (status != DMUB_STATUS_OK) {
1439 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1444 * Allocate a framebuffer based on the total size of all the regions.
1445 * TODO: Move this into GART.
1447 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1448 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1449 &adev->dm.dmub_bo_gpu_addr,
1450 &adev->dm.dmub_bo_cpu_addr);
1454 /* Rebase the regions on the framebuffer address. */
1455 memset(&fb_params, 0, sizeof(fb_params));
1456 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1457 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1458 fb_params.region_info = ®ion_info;
1460 adev->dm.dmub_fb_info =
1461 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1462 fb_info = adev->dm.dmub_fb_info;
1466 "Failed to allocate framebuffer info for DMUB service!\n");
1470 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1471 if (status != DMUB_STATUS_OK) {
1472 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1479 static int dm_sw_init(void *handle)
1481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1484 r = dm_dmub_sw_init(adev);
1488 return load_dmcu_fw(adev);
1491 static int dm_sw_fini(void *handle)
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1495 kfree(adev->dm.dmub_fb_info);
1496 adev->dm.dmub_fb_info = NULL;
1498 if (adev->dm.dmub_srv) {
1499 dmub_srv_destroy(adev->dm.dmub_srv);
1500 adev->dm.dmub_srv = NULL;
1503 release_firmware(adev->dm.dmub_fw);
1504 adev->dm.dmub_fw = NULL;
1506 release_firmware(adev->dm.fw_dmcu);
1507 adev->dm.fw_dmcu = NULL;
1512 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1514 struct amdgpu_dm_connector *aconnector;
1515 struct drm_connector *connector;
1516 struct drm_connector_list_iter iter;
1519 drm_connector_list_iter_begin(dev, &iter);
1520 drm_for_each_connector_iter(connector, &iter) {
1521 aconnector = to_amdgpu_dm_connector(connector);
1522 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1523 aconnector->mst_mgr.aux) {
1524 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1526 aconnector->base.base.id);
1528 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1530 DRM_ERROR("DM_MST: Failed to start MST\n");
1531 aconnector->dc_link->type =
1532 dc_connection_single;
1537 drm_connector_list_iter_end(&iter);
1542 static int dm_late_init(void *handle)
1544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 struct dmcu_iram_parameters params;
1547 unsigned int linear_lut[16];
1549 struct dmcu *dmcu = NULL;
1552 dmcu = adev->dm.dc->res_pool->dmcu;
1554 for (i = 0; i < 16; i++)
1555 linear_lut[i] = 0xFFFF * i / 15;
1558 params.backlight_ramping_start = 0xCCCC;
1559 params.backlight_ramping_reduction = 0xCCCCCCCC;
1560 params.backlight_lut_array_size = 16;
1561 params.backlight_lut_array = linear_lut;
1563 /* Min backlight level after ABM reduction, Don't allow below 1%
1564 * 0xFFFF x 0.01 = 0x28F
1566 params.min_abm_backlight = 0x28F;
1568 /* In the case where abm is implemented on dmcub,
1569 * dmcu object will be null.
1570 * ABM 2.4 and up are implemented on dmcub.
1573 ret = dmcu_load_iram(dmcu, params);
1574 else if (adev->dm.dc->ctx->dmub_srv)
1575 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1580 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1583 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1585 struct amdgpu_dm_connector *aconnector;
1586 struct drm_connector *connector;
1587 struct drm_connector_list_iter iter;
1588 struct drm_dp_mst_topology_mgr *mgr;
1590 bool need_hotplug = false;
1592 drm_connector_list_iter_begin(dev, &iter);
1593 drm_for_each_connector_iter(connector, &iter) {
1594 aconnector = to_amdgpu_dm_connector(connector);
1595 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1596 aconnector->mst_port)
1599 mgr = &aconnector->mst_mgr;
1602 drm_dp_mst_topology_mgr_suspend(mgr);
1604 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1606 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1607 need_hotplug = true;
1611 drm_connector_list_iter_end(&iter);
1614 drm_kms_helper_hotplug_event(dev);
1617 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1619 struct smu_context *smu = &adev->smu;
1622 if (!is_support_sw_smu(adev))
1625 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1626 * on window driver dc implementation.
1627 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1628 * should be passed to smu during boot up and resume from s3.
1629 * boot up: dc calculate dcn watermark clock settings within dc_create,
1630 * dcn20_resource_construct
1631 * then call pplib functions below to pass the settings to smu:
1632 * smu_set_watermarks_for_clock_ranges
1633 * smu_set_watermarks_table
1634 * navi10_set_watermarks_table
1635 * smu_write_watermarks_table
1637 * For Renoir, clock settings of dcn watermark are also fixed values.
1638 * dc has implemented different flow for window driver:
1639 * dc_hardware_init / dc_set_power_state
1644 * smu_set_watermarks_for_clock_ranges
1645 * renoir_set_watermarks_table
1646 * smu_write_watermarks_table
1649 * dc_hardware_init -> amdgpu_dm_init
1650 * dc_set_power_state --> dm_resume
1652 * therefore, this function apply to navi10/12/14 but not Renoir
1655 switch(adev->asic_type) {
1664 ret = smu_write_watermarks_table(smu);
1666 DRM_ERROR("Failed to update WMTABLE!\n");
1674 * dm_hw_init() - Initialize DC device
1675 * @handle: The base driver device containing the amdgpu_dm device.
1677 * Initialize the &struct amdgpu_display_manager device. This involves calling
1678 * the initializers of each DM component, then populating the struct with them.
1680 * Although the function implies hardware initialization, both hardware and
1681 * software are initialized here. Splitting them out to their relevant init
1682 * hooks is a future TODO item.
1684 * Some notable things that are initialized here:
1686 * - Display Core, both software and hardware
1687 * - DC modules that we need (freesync and color management)
1688 * - DRM software states
1689 * - Interrupt sources and handlers
1691 * - Debug FS entries, if enabled
1693 static int dm_hw_init(void *handle)
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 /* Create DAL display manager */
1697 amdgpu_dm_init(adev);
1698 amdgpu_dm_hpd_init(adev);
1704 * dm_hw_fini() - Teardown DC device
1705 * @handle: The base driver device containing the amdgpu_dm device.
1707 * Teardown components within &struct amdgpu_display_manager that require
1708 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1709 * were loaded. Also flush IRQ workqueues and disable them.
1711 static int dm_hw_fini(void *handle)
1713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1715 amdgpu_dm_hpd_fini(adev);
1717 amdgpu_dm_irq_fini(adev);
1718 amdgpu_dm_fini(adev);
1723 static int dm_enable_vblank(struct drm_crtc *crtc);
1724 static void dm_disable_vblank(struct drm_crtc *crtc);
1726 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1727 struct dc_state *state, bool enable)
1729 enum dc_irq_source irq_source;
1730 struct amdgpu_crtc *acrtc;
1734 for (i = 0; i < state->stream_count; i++) {
1735 acrtc = get_crtc_by_otg_inst(
1736 adev, state->stream_status[i].primary_otg_inst);
1738 if (acrtc && state->stream_status[i].plane_count != 0) {
1739 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1740 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1741 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1742 acrtc->crtc_id, enable ? "en" : "dis", rc);
1744 DRM_WARN("Failed to %s pflip interrupts\n",
1745 enable ? "enable" : "disable");
1748 rc = dm_enable_vblank(&acrtc->base);
1750 DRM_WARN("Failed to enable vblank interrupts\n");
1752 dm_disable_vblank(&acrtc->base);
1760 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1762 struct dc_state *context = NULL;
1763 enum dc_status res = DC_ERROR_UNEXPECTED;
1765 struct dc_stream_state *del_streams[MAX_PIPES];
1766 int del_streams_count = 0;
1768 memset(del_streams, 0, sizeof(del_streams));
1770 context = dc_create_state(dc);
1771 if (context == NULL)
1772 goto context_alloc_fail;
1774 dc_resource_state_copy_construct_current(dc, context);
1776 /* First remove from context all streams */
1777 for (i = 0; i < context->stream_count; i++) {
1778 struct dc_stream_state *stream = context->streams[i];
1780 del_streams[del_streams_count++] = stream;
1783 /* Remove all planes for removed streams and then remove the streams */
1784 for (i = 0; i < del_streams_count; i++) {
1785 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1786 res = DC_FAIL_DETACH_SURFACES;
1790 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1796 res = dc_validate_global_state(dc, context, false);
1799 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1803 res = dc_commit_state(dc, context);
1806 dc_release_state(context);
1812 static int dm_suspend(void *handle)
1814 struct amdgpu_device *adev = handle;
1815 struct amdgpu_display_manager *dm = &adev->dm;
1818 if (amdgpu_in_reset(adev)) {
1819 mutex_lock(&dm->dc_lock);
1820 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1822 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1824 amdgpu_dm_commit_zero_streams(dm->dc);
1826 amdgpu_dm_irq_suspend(adev);
1831 WARN_ON(adev->dm.cached_state);
1832 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1834 s3_handle_mst(adev_to_drm(adev), true);
1836 amdgpu_dm_irq_suspend(adev);
1839 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1844 static struct amdgpu_dm_connector *
1845 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1846 struct drm_crtc *crtc)
1849 struct drm_connector_state *new_con_state;
1850 struct drm_connector *connector;
1851 struct drm_crtc *crtc_from_state;
1853 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1854 crtc_from_state = new_con_state->crtc;
1856 if (crtc_from_state == crtc)
1857 return to_amdgpu_dm_connector(connector);
1863 static void emulated_link_detect(struct dc_link *link)
1865 struct dc_sink_init_data sink_init_data = { 0 };
1866 struct display_sink_capability sink_caps = { 0 };
1867 enum dc_edid_status edid_status;
1868 struct dc_context *dc_ctx = link->ctx;
1869 struct dc_sink *sink = NULL;
1870 struct dc_sink *prev_sink = NULL;
1872 link->type = dc_connection_none;
1873 prev_sink = link->local_sink;
1875 if (prev_sink != NULL)
1876 dc_sink_retain(prev_sink);
1878 switch (link->connector_signal) {
1879 case SIGNAL_TYPE_HDMI_TYPE_A: {
1880 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1881 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1885 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1886 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1887 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1891 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1892 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1893 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1897 case SIGNAL_TYPE_LVDS: {
1898 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1899 sink_caps.signal = SIGNAL_TYPE_LVDS;
1903 case SIGNAL_TYPE_EDP: {
1904 sink_caps.transaction_type =
1905 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1906 sink_caps.signal = SIGNAL_TYPE_EDP;
1910 case SIGNAL_TYPE_DISPLAY_PORT: {
1911 sink_caps.transaction_type =
1912 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1913 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1918 DC_ERROR("Invalid connector type! signal:%d\n",
1919 link->connector_signal);
1923 sink_init_data.link = link;
1924 sink_init_data.sink_signal = sink_caps.signal;
1926 sink = dc_sink_create(&sink_init_data);
1928 DC_ERROR("Failed to create sink!\n");
1932 /* dc_sink_create returns a new reference */
1933 link->local_sink = sink;
1935 edid_status = dm_helpers_read_local_edid(
1940 if (edid_status != EDID_OK)
1941 DC_ERROR("Failed to read EDID");
1945 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1946 struct amdgpu_display_manager *dm)
1949 struct dc_surface_update surface_updates[MAX_SURFACES];
1950 struct dc_plane_info plane_infos[MAX_SURFACES];
1951 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1952 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1953 struct dc_stream_update stream_update;
1957 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960 dm_error("Failed to allocate update bundle\n");
1964 for (k = 0; k < dc_state->stream_count; k++) {
1965 bundle->stream_update.stream = dc_state->streams[k];
1967 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1968 bundle->surface_updates[m].surface =
1969 dc_state->stream_status->plane_states[m];
1970 bundle->surface_updates[m].surface->force_full_update =
1973 dc_commit_updates_for_stream(
1974 dm->dc, bundle->surface_updates,
1975 dc_state->stream_status->plane_count,
1976 dc_state->streams[k], &bundle->stream_update, dc_state);
1985 static void dm_set_dpms_off(struct dc_link *link)
1987 struct dc_stream_state *stream_state;
1988 struct amdgpu_dm_connector *aconnector = link->priv;
1989 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1990 struct dc_stream_update stream_update;
1991 bool dpms_off = true;
1993 memset(&stream_update, 0, sizeof(stream_update));
1994 stream_update.dpms_off = &dpms_off;
1996 mutex_lock(&adev->dm.dc_lock);
1997 stream_state = dc_stream_find_from_link(link);
1999 if (stream_state == NULL) {
2000 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2001 mutex_unlock(&adev->dm.dc_lock);
2005 stream_update.stream = stream_state;
2006 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2007 stream_state, &stream_update,
2008 stream_state->ctx->dc->current_state);
2009 mutex_unlock(&adev->dm.dc_lock);
2012 static int dm_resume(void *handle)
2014 struct amdgpu_device *adev = handle;
2015 struct drm_device *ddev = adev_to_drm(adev);
2016 struct amdgpu_display_manager *dm = &adev->dm;
2017 struct amdgpu_dm_connector *aconnector;
2018 struct drm_connector *connector;
2019 struct drm_connector_list_iter iter;
2020 struct drm_crtc *crtc;
2021 struct drm_crtc_state *new_crtc_state;
2022 struct dm_crtc_state *dm_new_crtc_state;
2023 struct drm_plane *plane;
2024 struct drm_plane_state *new_plane_state;
2025 struct dm_plane_state *dm_new_plane_state;
2026 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2027 enum dc_connection_type new_connection_type = dc_connection_none;
2028 struct dc_state *dc_state;
2031 if (amdgpu_in_reset(adev)) {
2032 dc_state = dm->cached_dc_state;
2034 r = dm_dmub_hw_init(adev);
2036 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 amdgpu_dm_irq_resume_early(adev);
2043 for (i = 0; i < dc_state->stream_count; i++) {
2044 dc_state->streams[i]->mode_changed = true;
2045 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2046 dc_state->stream_status->plane_states[j]->update_flags.raw
2051 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2053 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2055 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2057 dc_release_state(dm->cached_dc_state);
2058 dm->cached_dc_state = NULL;
2060 amdgpu_dm_irq_resume_late(adev);
2062 mutex_unlock(&dm->dc_lock);
2066 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2067 dc_release_state(dm_state->context);
2068 dm_state->context = dc_create_state(dm->dc);
2069 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2070 dc_resource_state_construct(dm->dc, dm_state->context);
2072 /* Before powering on DC we need to re-initialize DMUB. */
2073 r = dm_dmub_hw_init(adev);
2075 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2077 /* power on hardware */
2078 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2080 /* program HPD filter */
2084 * early enable HPD Rx IRQ, should be done before set mode as short
2085 * pulse interrupts are used for MST
2087 amdgpu_dm_irq_resume_early(adev);
2089 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2090 s3_handle_mst(ddev, false);
2093 drm_connector_list_iter_begin(ddev, &iter);
2094 drm_for_each_connector_iter(connector, &iter) {
2095 aconnector = to_amdgpu_dm_connector(connector);
2098 * this is the case when traversing through already created
2099 * MST connectors, should be skipped
2101 if (aconnector->mst_port)
2104 mutex_lock(&aconnector->hpd_lock);
2105 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2106 DRM_ERROR("KMS: Failed to detect connector\n");
2108 if (aconnector->base.force && new_connection_type == dc_connection_none)
2109 emulated_link_detect(aconnector->dc_link);
2111 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2113 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2114 aconnector->fake_enable = false;
2116 if (aconnector->dc_sink)
2117 dc_sink_release(aconnector->dc_sink);
2118 aconnector->dc_sink = NULL;
2119 amdgpu_dm_update_connector_after_detect(aconnector);
2120 mutex_unlock(&aconnector->hpd_lock);
2122 drm_connector_list_iter_end(&iter);
2124 /* Force mode set in atomic commit */
2125 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2126 new_crtc_state->active_changed = true;
2129 * atomic_check is expected to create the dc states. We need to release
2130 * them here, since they were duplicated as part of the suspend
2133 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2134 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2135 if (dm_new_crtc_state->stream) {
2136 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2137 dc_stream_release(dm_new_crtc_state->stream);
2138 dm_new_crtc_state->stream = NULL;
2142 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2143 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2144 if (dm_new_plane_state->dc_state) {
2145 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2146 dc_plane_state_release(dm_new_plane_state->dc_state);
2147 dm_new_plane_state->dc_state = NULL;
2151 drm_atomic_helper_resume(ddev, dm->cached_state);
2153 dm->cached_state = NULL;
2155 amdgpu_dm_irq_resume_late(adev);
2157 amdgpu_dm_smu_write_watermarks_table(adev);
2165 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2166 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2167 * the base driver's device list to be initialized and torn down accordingly.
2169 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2172 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2174 .early_init = dm_early_init,
2175 .late_init = dm_late_init,
2176 .sw_init = dm_sw_init,
2177 .sw_fini = dm_sw_fini,
2178 .hw_init = dm_hw_init,
2179 .hw_fini = dm_hw_fini,
2180 .suspend = dm_suspend,
2181 .resume = dm_resume,
2182 .is_idle = dm_is_idle,
2183 .wait_for_idle = dm_wait_for_idle,
2184 .check_soft_reset = dm_check_soft_reset,
2185 .soft_reset = dm_soft_reset,
2186 .set_clockgating_state = dm_set_clockgating_state,
2187 .set_powergating_state = dm_set_powergating_state,
2190 const struct amdgpu_ip_block_version dm_ip_block =
2192 .type = AMD_IP_BLOCK_TYPE_DCE,
2196 .funcs = &amdgpu_dm_funcs,
2206 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2207 .fb_create = amdgpu_display_user_framebuffer_create,
2208 .get_format_info = amd_get_format_info,
2209 .output_poll_changed = drm_fb_helper_output_poll_changed,
2210 .atomic_check = amdgpu_dm_atomic_check,
2211 .atomic_commit = drm_atomic_helper_commit,
2214 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2215 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2218 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2220 u32 max_cll, min_cll, max, min, q, r;
2221 struct amdgpu_dm_backlight_caps *caps;
2222 struct amdgpu_display_manager *dm;
2223 struct drm_connector *conn_base;
2224 struct amdgpu_device *adev;
2225 struct dc_link *link = NULL;
2226 static const u8 pre_computed_values[] = {
2227 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2228 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2230 if (!aconnector || !aconnector->dc_link)
2233 link = aconnector->dc_link;
2234 if (link->connector_signal != SIGNAL_TYPE_EDP)
2237 conn_base = &aconnector->base;
2238 adev = drm_to_adev(conn_base->dev);
2240 caps = &dm->backlight_caps;
2241 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2242 caps->aux_support = false;
2243 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2244 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2246 if (caps->ext_caps->bits.oled == 1 ||
2247 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2248 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2249 caps->aux_support = true;
2251 /* From the specification (CTA-861-G), for calculating the maximum
2252 * luminance we need to use:
2253 * Luminance = 50*2**(CV/32)
2254 * Where CV is a one-byte value.
2255 * For calculating this expression we may need float point precision;
2256 * to avoid this complexity level, we take advantage that CV is divided
2257 * by a constant. From the Euclids division algorithm, we know that CV
2258 * can be written as: CV = 32*q + r. Next, we replace CV in the
2259 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2260 * need to pre-compute the value of r/32. For pre-computing the values
2261 * We just used the following Ruby line:
2262 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2263 * The results of the above expressions can be verified at
2264 * pre_computed_values.
2268 max = (1 << q) * pre_computed_values[r];
2270 // min luminance: maxLum * (CV/255)^2 / 100
2271 q = DIV_ROUND_CLOSEST(min_cll, 255);
2272 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2274 caps->aux_max_input_signal = max;
2275 caps->aux_min_input_signal = min;
2278 void amdgpu_dm_update_connector_after_detect(
2279 struct amdgpu_dm_connector *aconnector)
2281 struct drm_connector *connector = &aconnector->base;
2282 struct drm_device *dev = connector->dev;
2283 struct dc_sink *sink;
2285 /* MST handled by drm_mst framework */
2286 if (aconnector->mst_mgr.mst_state == true)
2289 sink = aconnector->dc_link->local_sink;
2291 dc_sink_retain(sink);
2294 * Edid mgmt connector gets first update only in mode_valid hook and then
2295 * the connector sink is set to either fake or physical sink depends on link status.
2296 * Skip if already done during boot.
2298 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2299 && aconnector->dc_em_sink) {
2302 * For S3 resume with headless use eml_sink to fake stream
2303 * because on resume connector->sink is set to NULL
2305 mutex_lock(&dev->mode_config.mutex);
2308 if (aconnector->dc_sink) {
2309 amdgpu_dm_update_freesync_caps(connector, NULL);
2311 * retain and release below are used to
2312 * bump up refcount for sink because the link doesn't point
2313 * to it anymore after disconnect, so on next crtc to connector
2314 * reshuffle by UMD we will get into unwanted dc_sink release
2316 dc_sink_release(aconnector->dc_sink);
2318 aconnector->dc_sink = sink;
2319 dc_sink_retain(aconnector->dc_sink);
2320 amdgpu_dm_update_freesync_caps(connector,
2323 amdgpu_dm_update_freesync_caps(connector, NULL);
2324 if (!aconnector->dc_sink) {
2325 aconnector->dc_sink = aconnector->dc_em_sink;
2326 dc_sink_retain(aconnector->dc_sink);
2330 mutex_unlock(&dev->mode_config.mutex);
2333 dc_sink_release(sink);
2338 * TODO: temporary guard to look for proper fix
2339 * if this sink is MST sink, we should not do anything
2341 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2342 dc_sink_release(sink);
2346 if (aconnector->dc_sink == sink) {
2348 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2351 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2352 aconnector->connector_id);
2354 dc_sink_release(sink);
2358 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2359 aconnector->connector_id, aconnector->dc_sink, sink);
2361 mutex_lock(&dev->mode_config.mutex);
2364 * 1. Update status of the drm connector
2365 * 2. Send an event and let userspace tell us what to do
2369 * TODO: check if we still need the S3 mode update workaround.
2370 * If yes, put it here.
2372 if (aconnector->dc_sink)
2373 amdgpu_dm_update_freesync_caps(connector, NULL);
2375 aconnector->dc_sink = sink;
2376 dc_sink_retain(aconnector->dc_sink);
2377 if (sink->dc_edid.length == 0) {
2378 aconnector->edid = NULL;
2379 if (aconnector->dc_link->aux_mode) {
2380 drm_dp_cec_unset_edid(
2381 &aconnector->dm_dp_aux.aux);
2385 (struct edid *)sink->dc_edid.raw_edid;
2387 drm_connector_update_edid_property(connector,
2389 aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
2390 drm_connector_list_update(connector);
2392 if (aconnector->dc_link->aux_mode)
2393 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2397 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2398 update_connector_ext_caps(aconnector);
2400 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2401 amdgpu_dm_update_freesync_caps(connector, NULL);
2402 drm_connector_update_edid_property(connector, NULL);
2403 aconnector->num_modes = 0;
2404 dc_sink_release(aconnector->dc_sink);
2405 aconnector->dc_sink = NULL;
2406 aconnector->edid = NULL;
2407 #ifdef CONFIG_DRM_AMD_DC_HDCP
2408 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2409 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2410 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2414 mutex_unlock(&dev->mode_config.mutex);
2416 update_subconnector_property(aconnector);
2419 dc_sink_release(sink);
2422 static void handle_hpd_irq(void *param)
2424 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2425 struct drm_connector *connector = &aconnector->base;
2426 struct drm_device *dev = connector->dev;
2427 enum dc_connection_type new_connection_type = dc_connection_none;
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429 struct amdgpu_device *adev = drm_to_adev(dev);
2430 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2434 * In case of failure or MST no need to update connector status or notify the OS
2435 * since (for MST case) MST does this in its own context.
2437 mutex_lock(&aconnector->hpd_lock);
2439 #ifdef CONFIG_DRM_AMD_DC_HDCP
2440 if (adev->dm.hdcp_workqueue) {
2441 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2442 dm_con_state->update_hdcp = true;
2445 if (aconnector->fake_enable)
2446 aconnector->fake_enable = false;
2448 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2449 DRM_ERROR("KMS: Failed to detect connector\n");
2451 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2452 emulated_link_detect(aconnector->dc_link);
2455 drm_modeset_lock_all(dev);
2456 dm_restore_drm_connector_state(dev, connector);
2457 drm_modeset_unlock_all(dev);
2459 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2460 drm_kms_helper_hotplug_event(dev);
2462 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2463 if (new_connection_type == dc_connection_none &&
2464 aconnector->dc_link->type == dc_connection_none)
2465 dm_set_dpms_off(aconnector->dc_link);
2467 amdgpu_dm_update_connector_after_detect(aconnector);
2469 drm_modeset_lock_all(dev);
2470 dm_restore_drm_connector_state(dev, connector);
2471 drm_modeset_unlock_all(dev);
2473 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2474 drm_kms_helper_hotplug_event(dev);
2476 mutex_unlock(&aconnector->hpd_lock);
2480 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2482 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2484 bool new_irq_handled = false;
2486 int dpcd_bytes_to_read;
2488 const int max_process_count = 30;
2489 int process_count = 0;
2491 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2493 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2494 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2495 /* DPCD 0x200 - 0x201 for downstream IRQ */
2496 dpcd_addr = DP_SINK_COUNT;
2498 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2499 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2500 dpcd_addr = DP_SINK_COUNT_ESI;
2503 dret = drm_dp_dpcd_read(
2504 &aconnector->dm_dp_aux.aux,
2507 dpcd_bytes_to_read);
2509 while (dret == dpcd_bytes_to_read &&
2510 process_count < max_process_count) {
2516 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2517 /* handle HPD short pulse irq */
2518 if (aconnector->mst_mgr.mst_state)
2520 &aconnector->mst_mgr,
2524 if (new_irq_handled) {
2525 /* ACK at DPCD to notify down stream */
2526 const int ack_dpcd_bytes_to_write =
2527 dpcd_bytes_to_read - 1;
2529 for (retry = 0; retry < 3; retry++) {
2532 wret = drm_dp_dpcd_write(
2533 &aconnector->dm_dp_aux.aux,
2536 ack_dpcd_bytes_to_write);
2537 if (wret == ack_dpcd_bytes_to_write)
2541 /* check if there is new irq to be handled */
2542 dret = drm_dp_dpcd_read(
2543 &aconnector->dm_dp_aux.aux,
2546 dpcd_bytes_to_read);
2548 new_irq_handled = false;
2554 if (process_count == max_process_count)
2555 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2558 static void handle_hpd_rx_irq(void *param)
2560 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2561 struct drm_connector *connector = &aconnector->base;
2562 struct drm_device *dev = connector->dev;
2563 struct dc_link *dc_link = aconnector->dc_link;
2564 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2565 bool result = false;
2566 enum dc_connection_type new_connection_type = dc_connection_none;
2567 struct amdgpu_device *adev = drm_to_adev(dev);
2568 union hpd_irq_data hpd_irq_data;
2570 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2573 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2574 * conflict, after implement i2c helper, this mutex should be
2577 if (dc_link->type != dc_connection_mst_branch)
2578 mutex_lock(&aconnector->hpd_lock);
2580 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2582 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2583 (dc_link->type == dc_connection_mst_branch)) {
2584 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2586 dm_handle_hpd_rx_irq(aconnector);
2588 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2590 dm_handle_hpd_rx_irq(aconnector);
2595 mutex_lock(&adev->dm.dc_lock);
2596 #ifdef CONFIG_DRM_AMD_DC_HDCP
2597 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2599 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2601 mutex_unlock(&adev->dm.dc_lock);
2604 if (result && !is_mst_root_connector) {
2605 /* Downstream Port status changed. */
2606 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2607 DRM_ERROR("KMS: Failed to detect connector\n");
2609 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2610 emulated_link_detect(dc_link);
2612 if (aconnector->fake_enable)
2613 aconnector->fake_enable = false;
2615 amdgpu_dm_update_connector_after_detect(aconnector);
2618 drm_modeset_lock_all(dev);
2619 dm_restore_drm_connector_state(dev, connector);
2620 drm_modeset_unlock_all(dev);
2622 drm_kms_helper_hotplug_event(dev);
2623 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2625 if (aconnector->fake_enable)
2626 aconnector->fake_enable = false;
2628 amdgpu_dm_update_connector_after_detect(aconnector);
2631 drm_modeset_lock_all(dev);
2632 dm_restore_drm_connector_state(dev, connector);
2633 drm_modeset_unlock_all(dev);
2635 drm_kms_helper_hotplug_event(dev);
2638 #ifdef CONFIG_DRM_AMD_DC_HDCP
2639 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2640 if (adev->dm.hdcp_workqueue)
2641 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2645 if (dc_link->type != dc_connection_mst_branch) {
2646 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2647 mutex_unlock(&aconnector->hpd_lock);
2651 static void register_hpd_handlers(struct amdgpu_device *adev)
2653 struct drm_device *dev = adev_to_drm(adev);
2654 struct drm_connector *connector;
2655 struct amdgpu_dm_connector *aconnector;
2656 const struct dc_link *dc_link;
2657 struct dc_interrupt_params int_params = {0};
2659 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2660 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2662 list_for_each_entry(connector,
2663 &dev->mode_config.connector_list, head) {
2665 aconnector = to_amdgpu_dm_connector(connector);
2666 dc_link = aconnector->dc_link;
2668 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2669 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2670 int_params.irq_source = dc_link->irq_source_hpd;
2672 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2674 (void *) aconnector);
2677 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2679 /* Also register for DP short pulse (hpd_rx). */
2680 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2681 int_params.irq_source = dc_link->irq_source_hpd_rx;
2683 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2685 (void *) aconnector);
2690 #if defined(CONFIG_DRM_AMD_DC_SI)
2691 /* Register IRQ sources and initialize IRQ callbacks */
2692 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2694 struct dc *dc = adev->dm.dc;
2695 struct common_irq_params *c_irq_params;
2696 struct dc_interrupt_params int_params = {0};
2699 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2701 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2702 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2705 * Actions of amdgpu_irq_add_id():
2706 * 1. Register a set() function with base driver.
2707 * Base driver will call set() function to enable/disable an
2708 * interrupt in DC hardware.
2709 * 2. Register amdgpu_dm_irq_handler().
2710 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2711 * coming from DC hardware.
2712 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2713 * for acknowledging and handling. */
2715 /* Use VBLANK interrupt */
2716 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2717 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2719 DRM_ERROR("Failed to add crtc irq id!\n");
2723 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2724 int_params.irq_source =
2725 dc_interrupt_to_irq_source(dc, i+1 , 0);
2727 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2729 c_irq_params->adev = adev;
2730 c_irq_params->irq_src = int_params.irq_source;
2732 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2733 dm_crtc_high_irq, c_irq_params);
2736 /* Use GRPH_PFLIP interrupt */
2737 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2738 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2739 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2741 DRM_ERROR("Failed to add page flip irq id!\n");
2745 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2746 int_params.irq_source =
2747 dc_interrupt_to_irq_source(dc, i, 0);
2749 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2751 c_irq_params->adev = adev;
2752 c_irq_params->irq_src = int_params.irq_source;
2754 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2755 dm_pflip_high_irq, c_irq_params);
2760 r = amdgpu_irq_add_id(adev, client_id,
2761 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2763 DRM_ERROR("Failed to add hpd irq id!\n");
2767 register_hpd_handlers(adev);
2773 /* Register IRQ sources and initialize IRQ callbacks */
2774 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2776 struct dc *dc = adev->dm.dc;
2777 struct common_irq_params *c_irq_params;
2778 struct dc_interrupt_params int_params = {0};
2781 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2783 if (adev->asic_type >= CHIP_VEGA10)
2784 client_id = SOC15_IH_CLIENTID_DCE;
2786 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2790 * Actions of amdgpu_irq_add_id():
2791 * 1. Register a set() function with base driver.
2792 * Base driver will call set() function to enable/disable an
2793 * interrupt in DC hardware.
2794 * 2. Register amdgpu_dm_irq_handler().
2795 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2796 * coming from DC hardware.
2797 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2798 * for acknowledging and handling. */
2800 /* Use VBLANK interrupt */
2801 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2802 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2804 DRM_ERROR("Failed to add crtc irq id!\n");
2808 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2809 int_params.irq_source =
2810 dc_interrupt_to_irq_source(dc, i, 0);
2812 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2814 c_irq_params->adev = adev;
2815 c_irq_params->irq_src = int_params.irq_source;
2817 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818 dm_crtc_high_irq, c_irq_params);
2821 /* Use VUPDATE interrupt */
2822 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2823 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2825 DRM_ERROR("Failed to add vupdate irq id!\n");
2829 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2830 int_params.irq_source =
2831 dc_interrupt_to_irq_source(dc, i, 0);
2833 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2835 c_irq_params->adev = adev;
2836 c_irq_params->irq_src = int_params.irq_source;
2838 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2839 dm_vupdate_high_irq, c_irq_params);
2842 /* Use GRPH_PFLIP interrupt */
2843 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2844 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2845 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2847 DRM_ERROR("Failed to add page flip irq id!\n");
2851 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2852 int_params.irq_source =
2853 dc_interrupt_to_irq_source(dc, i, 0);
2855 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2857 c_irq_params->adev = adev;
2858 c_irq_params->irq_src = int_params.irq_source;
2860 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2861 dm_pflip_high_irq, c_irq_params);
2866 r = amdgpu_irq_add_id(adev, client_id,
2867 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2869 DRM_ERROR("Failed to add hpd irq id!\n");
2873 register_hpd_handlers(adev);
2878 #if defined(CONFIG_DRM_AMD_DC_DCN)
2879 /* Register IRQ sources and initialize IRQ callbacks */
2880 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2882 struct dc *dc = adev->dm.dc;
2883 struct common_irq_params *c_irq_params;
2884 struct dc_interrupt_params int_params = {0};
2888 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2889 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2892 * Actions of amdgpu_irq_add_id():
2893 * 1. Register a set() function with base driver.
2894 * Base driver will call set() function to enable/disable an
2895 * interrupt in DC hardware.
2896 * 2. Register amdgpu_dm_irq_handler().
2897 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2898 * coming from DC hardware.
2899 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2900 * for acknowledging and handling.
2903 /* Use VSTARTUP interrupt */
2904 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2905 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2907 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2910 DRM_ERROR("Failed to add crtc irq id!\n");
2914 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2915 int_params.irq_source =
2916 dc_interrupt_to_irq_source(dc, i, 0);
2918 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2920 c_irq_params->adev = adev;
2921 c_irq_params->irq_src = int_params.irq_source;
2923 amdgpu_dm_irq_register_interrupt(
2924 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2927 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2928 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2929 * to trigger at end of each vblank, regardless of state of the lock,
2930 * matching DCE behaviour.
2932 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2933 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2935 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2938 DRM_ERROR("Failed to add vupdate irq id!\n");
2942 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943 int_params.irq_source =
2944 dc_interrupt_to_irq_source(dc, i, 0);
2946 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2948 c_irq_params->adev = adev;
2949 c_irq_params->irq_src = int_params.irq_source;
2951 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952 dm_vupdate_high_irq, c_irq_params);
2955 /* Use GRPH_PFLIP interrupt */
2956 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2957 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2959 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2961 DRM_ERROR("Failed to add page flip irq id!\n");
2965 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 int_params.irq_source =
2967 dc_interrupt_to_irq_source(dc, i, 0);
2969 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2971 c_irq_params->adev = adev;
2972 c_irq_params->irq_src = int_params.irq_source;
2974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 dm_pflip_high_irq, c_irq_params);
2980 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2983 DRM_ERROR("Failed to add hpd irq id!\n");
2987 register_hpd_handlers(adev);
2994 * Acquires the lock for the atomic state object and returns
2995 * the new atomic state.
2997 * This should only be called during atomic check.
2999 static int dm_atomic_get_state(struct drm_atomic_state *state,
3000 struct dm_atomic_state **dm_state)
3002 struct drm_device *dev = state->dev;
3003 struct amdgpu_device *adev = drm_to_adev(dev);
3004 struct amdgpu_display_manager *dm = &adev->dm;
3005 struct drm_private_state *priv_state;
3010 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3011 if (IS_ERR(priv_state))
3012 return PTR_ERR(priv_state);
3014 *dm_state = to_dm_atomic_state(priv_state);
3019 static struct dm_atomic_state *
3020 dm_atomic_get_new_state(struct drm_atomic_state *state)
3022 struct drm_device *dev = state->dev;
3023 struct amdgpu_device *adev = drm_to_adev(dev);
3024 struct amdgpu_display_manager *dm = &adev->dm;
3025 struct drm_private_obj *obj;
3026 struct drm_private_state *new_obj_state;
3029 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3030 if (obj->funcs == dm->atomic_obj.funcs)
3031 return to_dm_atomic_state(new_obj_state);
3037 static struct drm_private_state *
3038 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3040 struct dm_atomic_state *old_state, *new_state;
3042 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3046 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3048 old_state = to_dm_atomic_state(obj->state);
3050 if (old_state && old_state->context)
3051 new_state->context = dc_copy_state(old_state->context);
3053 if (!new_state->context) {
3058 return &new_state->base;
3061 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3062 struct drm_private_state *state)
3064 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3066 if (dm_state && dm_state->context)
3067 dc_release_state(dm_state->context);
3072 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3073 .atomic_duplicate_state = dm_atomic_duplicate_state,
3074 .atomic_destroy_state = dm_atomic_destroy_state,
3077 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3079 struct dm_atomic_state *state;
3082 adev->mode_info.mode_config_initialized = true;
3084 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3085 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3087 adev_to_drm(adev)->mode_config.max_width = 16384;
3088 adev_to_drm(adev)->mode_config.max_height = 16384;
3090 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3091 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3092 /* indicates support for immediate flip */
3093 adev_to_drm(adev)->mode_config.async_page_flip = true;
3095 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3097 state = kzalloc(sizeof(*state), GFP_KERNEL);
3101 state->context = dc_create_state(adev->dm.dc);
3102 if (!state->context) {
3107 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3109 drm_atomic_private_obj_init(adev_to_drm(adev),
3110 &adev->dm.atomic_obj,
3112 &dm_atomic_state_funcs);
3114 r = amdgpu_display_modeset_create_props(adev);
3116 dc_release_state(state->context);
3121 r = amdgpu_dm_audio_init(adev);
3123 dc_release_state(state->context);
3131 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3132 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3133 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3135 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3136 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3138 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3140 #if defined(CONFIG_ACPI)
3141 struct amdgpu_dm_backlight_caps caps;
3143 memset(&caps, 0, sizeof(caps));
3145 if (dm->backlight_caps.caps_valid)
3148 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3149 if (caps.caps_valid) {
3150 dm->backlight_caps.caps_valid = true;
3151 if (caps.aux_support)
3153 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3154 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3156 dm->backlight_caps.min_input_signal =
3157 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3158 dm->backlight_caps.max_input_signal =
3159 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3162 if (dm->backlight_caps.aux_support)
3165 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3166 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3170 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3177 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3178 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3183 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3184 unsigned *min, unsigned *max)
3189 if (caps->aux_support) {
3190 // Firmware limits are in nits, DC API wants millinits.
3191 *max = 1000 * caps->aux_max_input_signal;
3192 *min = 1000 * caps->aux_min_input_signal;
3194 // Firmware limits are 8-bit, PWM control is 16-bit.
3195 *max = 0x101 * caps->max_input_signal;
3196 *min = 0x101 * caps->min_input_signal;
3201 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3202 uint32_t brightness)
3206 if (!get_brightness_range(caps, &min, &max))
3209 // Rescale 0..255 to min..max
3210 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3211 AMDGPU_MAX_BL_LEVEL);
3214 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3215 uint32_t brightness)
3219 if (!get_brightness_range(caps, &min, &max))
3222 if (brightness < min)
3224 // Rescale min..max to 0..255
3225 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3229 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3231 struct amdgpu_display_manager *dm = bl_get_data(bd);
3232 struct amdgpu_dm_backlight_caps caps;
3233 struct dc_link *link = NULL;
3237 amdgpu_dm_update_backlight_caps(dm);
3238 caps = dm->backlight_caps;
3240 link = (struct dc_link *)dm->backlight_link;
3242 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3243 // Change brightness based on AUX property
3244 if (caps.aux_support)
3245 return set_backlight_via_aux(link, brightness);
3247 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3252 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3254 struct amdgpu_display_manager *dm = bl_get_data(bd);
3255 int ret = dc_link_get_backlight_level(dm->backlight_link);
3257 if (ret == DC_ERROR_UNEXPECTED)
3258 return bd->props.brightness;
3259 return convert_brightness_to_user(&dm->backlight_caps, ret);
3262 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3263 .options = BL_CORE_SUSPENDRESUME,
3264 .get_brightness = amdgpu_dm_backlight_get_brightness,
3265 .update_status = amdgpu_dm_backlight_update_status,
3269 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3272 struct backlight_properties props = { 0 };
3274 amdgpu_dm_update_backlight_caps(dm);
3276 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3277 props.brightness = AMDGPU_MAX_BL_LEVEL;
3278 props.type = BACKLIGHT_RAW;
3280 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3281 adev_to_drm(dm->adev)->primary->index);
3283 dm->backlight_dev = backlight_device_register(bl_name,
3284 adev_to_drm(dm->adev)->dev,
3286 &amdgpu_dm_backlight_ops,
3289 if (IS_ERR(dm->backlight_dev))
3290 DRM_ERROR("DM: Backlight registration failed!\n");
3292 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3297 static int initialize_plane(struct amdgpu_display_manager *dm,
3298 struct amdgpu_mode_info *mode_info, int plane_id,
3299 enum drm_plane_type plane_type,
3300 const struct dc_plane_cap *plane_cap)
3302 struct drm_plane *plane;
3303 unsigned long possible_crtcs;
3306 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3308 DRM_ERROR("KMS: Failed to allocate plane\n");
3311 plane->type = plane_type;
3314 * HACK: IGT tests expect that the primary plane for a CRTC
3315 * can only have one possible CRTC. Only expose support for
3316 * any CRTC if they're not going to be used as a primary plane
3317 * for a CRTC - like overlay or underlay planes.
3319 possible_crtcs = 1 << plane_id;
3320 if (plane_id >= dm->dc->caps.max_streams)
3321 possible_crtcs = 0xff;
3323 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3326 DRM_ERROR("KMS: Failed to initialize plane\n");
3332 mode_info->planes[plane_id] = plane;
3338 static void register_backlight_device(struct amdgpu_display_manager *dm,
3339 struct dc_link *link)
3341 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3342 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3344 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3345 link->type != dc_connection_none) {
3347 * Event if registration failed, we should continue with
3348 * DM initialization because not having a backlight control
3349 * is better then a black screen.
3351 amdgpu_dm_register_backlight_device(dm);
3353 if (dm->backlight_dev)
3354 dm->backlight_link = link;
3361 * In this architecture, the association
3362 * connector -> encoder -> crtc
3363 * id not really requried. The crtc and connector will hold the
3364 * display_index as an abstraction to use with DAL component
3366 * Returns 0 on success
3368 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3370 struct amdgpu_display_manager *dm = &adev->dm;
3372 struct amdgpu_dm_connector *aconnector = NULL;
3373 struct amdgpu_encoder *aencoder = NULL;
3374 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3376 int32_t primary_planes;
3377 enum dc_connection_type new_connection_type = dc_connection_none;
3378 const struct dc_plane_cap *plane;
3380 dm->display_indexes_num = dm->dc->caps.max_streams;
3381 /* Update the actual used number of crtc */
3382 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3384 link_cnt = dm->dc->caps.max_links;
3385 if (amdgpu_dm_mode_config_init(dm->adev)) {
3386 DRM_ERROR("DM: Failed to initialize mode config\n");
3390 /* There is one primary plane per CRTC */
3391 primary_planes = dm->dc->caps.max_streams;
3392 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3395 * Initialize primary planes, implicit planes for legacy IOCTLS.
3396 * Order is reversed to match iteration order in atomic check.
3398 for (i = (primary_planes - 1); i >= 0; i--) {
3399 plane = &dm->dc->caps.planes[i];
3401 if (initialize_plane(dm, mode_info, i,
3402 DRM_PLANE_TYPE_PRIMARY, plane)) {
3403 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3409 * Initialize overlay planes, index starting after primary planes.
3410 * These planes have a higher DRM index than the primary planes since
3411 * they should be considered as having a higher z-order.
3412 * Order is reversed to match iteration order in atomic check.
3414 * Only support DCN for now, and only expose one so we don't encourage
3415 * userspace to use up all the pipes.
3417 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3418 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3420 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3423 if (!plane->blends_with_above || !plane->blends_with_below)
3426 if (!plane->pixel_format_support.argb8888)
3429 if (initialize_plane(dm, NULL, primary_planes + i,
3430 DRM_PLANE_TYPE_OVERLAY, plane)) {
3431 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3435 /* Only create one overlay plane. */
3439 for (i = 0; i < dm->dc->caps.max_streams; i++)
3440 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3441 DRM_ERROR("KMS: Failed to initialize crtc\n");
3445 /* loops over all connectors on the board */
3446 for (i = 0; i < link_cnt; i++) {
3447 struct dc_link *link = NULL;
3449 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3451 "KMS: Cannot support more than %d display indexes\n",
3452 AMDGPU_DM_MAX_DISPLAY_INDEX);
3456 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3460 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3464 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3465 DRM_ERROR("KMS: Failed to initialize encoder\n");
3469 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3470 DRM_ERROR("KMS: Failed to initialize connector\n");
3474 link = dc_get_link_at_index(dm->dc, i);
3476 if (!dc_link_detect_sink(link, &new_connection_type))
3477 DRM_ERROR("KMS: Failed to detect connector\n");
3479 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3480 emulated_link_detect(link);
3481 amdgpu_dm_update_connector_after_detect(aconnector);
3483 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3484 amdgpu_dm_update_connector_after_detect(aconnector);
3485 register_backlight_device(dm, link);
3486 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3487 amdgpu_dm_set_psr_caps(link);
3493 /* Software is initialized. Now we can register interrupt handlers. */
3494 switch (adev->asic_type) {
3495 #if defined(CONFIG_DRM_AMD_DC_SI)
3500 if (dce60_register_irq_handlers(dm->adev)) {
3501 DRM_ERROR("DM: Failed to initialize IRQ\n");
3515 case CHIP_POLARIS11:
3516 case CHIP_POLARIS10:
3517 case CHIP_POLARIS12:
3522 if (dce110_register_irq_handlers(dm->adev)) {
3523 DRM_ERROR("DM: Failed to initialize IRQ\n");
3527 #if defined(CONFIG_DRM_AMD_DC_DCN)
3533 case CHIP_SIENNA_CICHLID:
3534 case CHIP_NAVY_FLOUNDER:
3535 case CHIP_DIMGREY_CAVEFISH:
3537 if (dcn10_register_irq_handlers(dm->adev)) {
3538 DRM_ERROR("DM: Failed to initialize IRQ\n");
3544 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3556 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3558 drm_mode_config_cleanup(dm->ddev);
3559 drm_atomic_private_obj_fini(&dm->atomic_obj);
3563 /******************************************************************************
3564 * amdgpu_display_funcs functions
3565 *****************************************************************************/
3568 * dm_bandwidth_update - program display watermarks
3570 * @adev: amdgpu_device pointer
3572 * Calculate and program the display watermarks and line buffer allocation.
3574 static void dm_bandwidth_update(struct amdgpu_device *adev)
3576 /* TODO: implement later */
3579 static const struct amdgpu_display_funcs dm_display_funcs = {
3580 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3581 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3582 .backlight_set_level = NULL, /* never called for DC */
3583 .backlight_get_level = NULL, /* never called for DC */
3584 .hpd_sense = NULL,/* called unconditionally */
3585 .hpd_set_polarity = NULL, /* called unconditionally */
3586 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3587 .page_flip_get_scanoutpos =
3588 dm_crtc_get_scanoutpos,/* called unconditionally */
3589 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3590 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3593 #if defined(CONFIG_DEBUG_KERNEL_DC)
3595 static ssize_t s3_debug_store(struct device *device,
3596 struct device_attribute *attr,
3602 struct drm_device *drm_dev = dev_get_drvdata(device);
3603 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3605 ret = kstrtoint(buf, 0, &s3_state);
3610 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3615 return ret == 0 ? count : 0;
3618 DEVICE_ATTR_WO(s3_debug);
3622 static int dm_early_init(void *handle)
3624 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3626 switch (adev->asic_type) {
3627 #if defined(CONFIG_DRM_AMD_DC_SI)
3631 adev->mode_info.num_crtc = 6;
3632 adev->mode_info.num_hpd = 6;
3633 adev->mode_info.num_dig = 6;
3636 adev->mode_info.num_crtc = 2;
3637 adev->mode_info.num_hpd = 2;
3638 adev->mode_info.num_dig = 2;
3643 adev->mode_info.num_crtc = 6;
3644 adev->mode_info.num_hpd = 6;
3645 adev->mode_info.num_dig = 6;
3648 adev->mode_info.num_crtc = 4;
3649 adev->mode_info.num_hpd = 6;
3650 adev->mode_info.num_dig = 7;
3654 adev->mode_info.num_crtc = 2;
3655 adev->mode_info.num_hpd = 6;
3656 adev->mode_info.num_dig = 6;
3660 adev->mode_info.num_crtc = 6;
3661 adev->mode_info.num_hpd = 6;
3662 adev->mode_info.num_dig = 7;
3665 adev->mode_info.num_crtc = 3;
3666 adev->mode_info.num_hpd = 6;
3667 adev->mode_info.num_dig = 9;
3670 adev->mode_info.num_crtc = 2;
3671 adev->mode_info.num_hpd = 6;
3672 adev->mode_info.num_dig = 9;
3674 case CHIP_POLARIS11:
3675 case CHIP_POLARIS12:
3676 adev->mode_info.num_crtc = 5;
3677 adev->mode_info.num_hpd = 5;
3678 adev->mode_info.num_dig = 5;
3680 case CHIP_POLARIS10:
3682 adev->mode_info.num_crtc = 6;
3683 adev->mode_info.num_hpd = 6;
3684 adev->mode_info.num_dig = 6;
3689 adev->mode_info.num_crtc = 6;
3690 adev->mode_info.num_hpd = 6;
3691 adev->mode_info.num_dig = 6;
3693 #if defined(CONFIG_DRM_AMD_DC_DCN)
3697 adev->mode_info.num_crtc = 4;
3698 adev->mode_info.num_hpd = 4;
3699 adev->mode_info.num_dig = 4;
3703 case CHIP_SIENNA_CICHLID:
3704 case CHIP_NAVY_FLOUNDER:
3705 adev->mode_info.num_crtc = 6;
3706 adev->mode_info.num_hpd = 6;
3707 adev->mode_info.num_dig = 6;
3710 case CHIP_DIMGREY_CAVEFISH:
3711 adev->mode_info.num_crtc = 5;
3712 adev->mode_info.num_hpd = 5;
3713 adev->mode_info.num_dig = 5;
3717 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3721 amdgpu_dm_set_irq_funcs(adev);
3723 if (adev->mode_info.funcs == NULL)
3724 adev->mode_info.funcs = &dm_display_funcs;
3727 * Note: Do NOT change adev->audio_endpt_rreg and
3728 * adev->audio_endpt_wreg because they are initialised in
3729 * amdgpu_device_init()
3731 #if defined(CONFIG_DEBUG_KERNEL_DC)
3733 adev_to_drm(adev)->dev,
3734 &dev_attr_s3_debug);
3740 static bool modeset_required(struct drm_crtc_state *crtc_state,
3741 struct dc_stream_state *new_stream,
3742 struct dc_stream_state *old_stream)
3744 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3747 static bool modereset_required(struct drm_crtc_state *crtc_state)
3749 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3752 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3754 drm_encoder_cleanup(encoder);
3758 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3759 .destroy = amdgpu_dm_encoder_destroy,
3763 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3764 struct dc_scaling_info *scaling_info)
3766 int scale_w, scale_h;
3768 memset(scaling_info, 0, sizeof(*scaling_info));
3770 /* Source is fixed 16.16 but we ignore mantissa for now... */
3771 scaling_info->src_rect.x = state->src_x >> 16;
3772 scaling_info->src_rect.y = state->src_y >> 16;
3774 scaling_info->src_rect.width = state->src_w >> 16;
3775 if (scaling_info->src_rect.width == 0)
3778 scaling_info->src_rect.height = state->src_h >> 16;
3779 if (scaling_info->src_rect.height == 0)
3782 scaling_info->dst_rect.x = state->crtc_x;
3783 scaling_info->dst_rect.y = state->crtc_y;
3785 if (state->crtc_w == 0)
3788 scaling_info->dst_rect.width = state->crtc_w;
3790 if (state->crtc_h == 0)
3793 scaling_info->dst_rect.height = state->crtc_h;
3795 /* DRM doesn't specify clipping on destination output. */
3796 scaling_info->clip_rect = scaling_info->dst_rect;
3798 /* TODO: Validate scaling per-format with DC plane caps */
3799 scale_w = scaling_info->dst_rect.width * 1000 /
3800 scaling_info->src_rect.width;
3802 if (scale_w < 250 || scale_w > 16000)
3805 scale_h = scaling_info->dst_rect.height * 1000 /
3806 scaling_info->src_rect.height;
3808 if (scale_h < 250 || scale_h > 16000)
3812 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3813 * assume reasonable defaults based on the format.
3820 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3821 uint64_t tiling_flags)
3823 /* Fill GFX8 params */
3824 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3825 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3827 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3828 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3829 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3830 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3831 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3833 /* XXX fix me for VI */
3834 tiling_info->gfx8.num_banks = num_banks;
3835 tiling_info->gfx8.array_mode =
3836 DC_ARRAY_2D_TILED_THIN1;
3837 tiling_info->gfx8.tile_split = tile_split;
3838 tiling_info->gfx8.bank_width = bankw;
3839 tiling_info->gfx8.bank_height = bankh;
3840 tiling_info->gfx8.tile_aspect = mtaspect;
3841 tiling_info->gfx8.tile_mode =
3842 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3843 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3844 == DC_ARRAY_1D_TILED_THIN1) {
3845 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3848 tiling_info->gfx8.pipe_config =
3849 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3853 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3854 union dc_tiling_info *tiling_info)
3856 tiling_info->gfx9.num_pipes =
3857 adev->gfx.config.gb_addr_config_fields.num_pipes;
3858 tiling_info->gfx9.num_banks =
3859 adev->gfx.config.gb_addr_config_fields.num_banks;
3860 tiling_info->gfx9.pipe_interleave =
3861 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3862 tiling_info->gfx9.num_shader_engines =
3863 adev->gfx.config.gb_addr_config_fields.num_se;
3864 tiling_info->gfx9.max_compressed_frags =
3865 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3866 tiling_info->gfx9.num_rb_per_se =
3867 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3868 tiling_info->gfx9.shaderEnable = 1;
3869 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3870 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3871 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3872 adev->asic_type == CHIP_VANGOGH)
3873 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3877 validate_dcc(struct amdgpu_device *adev,
3878 const enum surface_pixel_format format,
3879 const enum dc_rotation_angle rotation,
3880 const union dc_tiling_info *tiling_info,
3881 const struct dc_plane_dcc_param *dcc,
3882 const struct dc_plane_address *address,
3883 const struct plane_size *plane_size)
3885 struct dc *dc = adev->dm.dc;
3886 struct dc_dcc_surface_param input;
3887 struct dc_surface_dcc_cap output;
3889 memset(&input, 0, sizeof(input));
3890 memset(&output, 0, sizeof(output));
3895 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3896 !dc->cap_funcs.get_dcc_compression_cap)
3899 input.format = format;
3900 input.surface_size.width = plane_size->surface_size.width;
3901 input.surface_size.height = plane_size->surface_size.height;
3902 input.swizzle_mode = tiling_info->gfx9.swizzle;
3904 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3905 input.scan = SCAN_DIRECTION_HORIZONTAL;
3906 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3907 input.scan = SCAN_DIRECTION_VERTICAL;
3909 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3912 if (!output.capable)
3915 if (dcc->independent_64b_blks == 0 &&
3916 output.grph.rgb.independent_64b_blks != 0)
3923 modifier_has_dcc(uint64_t modifier)
3925 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3929 modifier_gfx9_swizzle_mode(uint64_t modifier)
3931 if (modifier == DRM_FORMAT_MOD_LINEAR)
3934 return AMD_FMT_MOD_GET(TILE, modifier);
3937 static const struct drm_format_info *
3938 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3940 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3944 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3945 union dc_tiling_info *tiling_info,
3948 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3949 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3950 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3951 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3953 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3955 if (!IS_AMD_FMT_MOD(modifier))
3958 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3959 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3961 if (adev->family >= AMDGPU_FAMILY_NV) {
3962 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3964 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3966 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3970 enum dm_micro_swizzle {
3971 MICRO_SWIZZLE_Z = 0,
3972 MICRO_SWIZZLE_S = 1,
3973 MICRO_SWIZZLE_D = 2,
3977 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3981 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3982 const struct drm_format_info *info = drm_format_info(format);
3984 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3990 * We always have to allow this modifier, because core DRM still
3991 * checks LINEAR support if userspace does not provide modifers.
3993 if (modifier == DRM_FORMAT_MOD_LINEAR)
3997 * The arbitrary tiling support for multiplane formats has not been hooked
4000 if (info->num_planes > 1)
4004 * For D swizzle the canonical modifier depends on the bpp, so check
4007 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4008 adev->family >= AMDGPU_FAMILY_NV) {
4009 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4013 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4017 if (modifier_has_dcc(modifier)) {
4018 /* Per radeonsi comments 16/64 bpp are more complicated. */
4019 if (info->cpp[0] != 4)
4027 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4032 if (*cap - *size < 1) {
4033 uint64_t new_cap = *cap * 2;
4034 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4042 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4048 (*mods)[*size] = mod;
4053 add_gfx9_modifiers(const struct amdgpu_device *adev,
4054 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4056 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4057 int pipe_xor_bits = min(8, pipes +
4058 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4059 int bank_xor_bits = min(8 - pipe_xor_bits,
4060 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4061 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4062 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4065 if (adev->family == AMDGPU_FAMILY_RV) {
4066 /* Raven2 and later */
4067 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4070 * No _D DCC swizzles yet because we only allow 32bpp, which
4071 * doesn't support _D on DCN
4074 if (has_constant_encode) {
4075 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4076 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4077 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4078 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4079 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4080 AMD_FMT_MOD_SET(DCC, 1) |
4081 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4082 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4083 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4086 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4087 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4088 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4089 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4090 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4091 AMD_FMT_MOD_SET(DCC, 1) |
4092 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4093 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4094 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4096 if (has_constant_encode) {
4097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4101 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4102 AMD_FMT_MOD_SET(DCC, 1) |
4103 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4107 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4108 AMD_FMT_MOD_SET(RB, rb) |
4109 AMD_FMT_MOD_SET(PIPE, pipes));
4112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4116 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4117 AMD_FMT_MOD_SET(DCC, 1) |
4118 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4119 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4120 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4121 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4122 AMD_FMT_MOD_SET(RB, rb) |
4123 AMD_FMT_MOD_SET(PIPE, pipes));
4127 * Only supported for 64bpp on Raven, will be filtered on format in
4128 * dm_plane_format_mod_supported.
4130 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4131 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4132 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4133 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4134 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4136 if (adev->family == AMDGPU_FAMILY_RV) {
4137 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4138 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4139 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4140 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4141 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4145 * Only supported for 64bpp on Raven, will be filtered on format in
4146 * dm_plane_format_mod_supported.
4148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4152 if (adev->family == AMDGPU_FAMILY_RV) {
4153 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4155 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4160 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4161 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4163 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4165 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4166 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4167 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4168 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4169 AMD_FMT_MOD_SET(DCC, 1) |
4170 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4171 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4172 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4174 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4175 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4176 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4177 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4178 AMD_FMT_MOD_SET(DCC, 1) |
4179 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4180 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4181 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4182 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4184 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4185 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4186 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4187 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4189 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4190 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4191 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4192 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4195 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4200 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4201 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4202 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4206 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4207 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4209 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4210 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4212 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4214 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4215 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4216 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4217 AMD_FMT_MOD_SET(DCC, 1) |
4218 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4219 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4220 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4221 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4226 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4228 AMD_FMT_MOD_SET(DCC, 1) |
4229 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4230 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4231 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4232 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4233 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4235 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4236 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4237 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4238 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4239 AMD_FMT_MOD_SET(PACKERS, pkrs));
4241 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4242 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4243 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4244 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4245 AMD_FMT_MOD_SET(PACKERS, pkrs));
4247 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4248 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4249 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4250 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4258 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4260 uint64_t size = 0, capacity = 128;
4263 /* We have not hooked up any pre-GFX9 modifiers. */
4264 if (adev->family < AMDGPU_FAMILY_AI)
4267 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4269 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4270 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4271 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4272 return *mods ? 0 : -ENOMEM;
4275 switch (adev->family) {
4276 case AMDGPU_FAMILY_AI:
4277 case AMDGPU_FAMILY_RV:
4278 add_gfx9_modifiers(adev, mods, &size, &capacity);
4280 case AMDGPU_FAMILY_NV:
4281 case AMDGPU_FAMILY_VGH:
4282 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4283 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4285 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4289 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4291 /* INVALID marks the end of the list. */
4292 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4301 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4302 const struct amdgpu_framebuffer *afb,
4303 const enum surface_pixel_format format,
4304 const enum dc_rotation_angle rotation,
4305 const struct plane_size *plane_size,
4306 union dc_tiling_info *tiling_info,
4307 struct dc_plane_dcc_param *dcc,
4308 struct dc_plane_address *address,
4309 const bool force_disable_dcc)
4311 const uint64_t modifier = afb->base.modifier;
4314 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4315 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4317 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4318 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4321 dcc->meta_pitch = afb->base.pitches[1];
4322 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4324 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4325 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4328 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4336 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4337 const struct amdgpu_framebuffer *afb,
4338 const enum surface_pixel_format format,
4339 const enum dc_rotation_angle rotation,
4340 const uint64_t tiling_flags,
4341 union dc_tiling_info *tiling_info,
4342 struct plane_size *plane_size,
4343 struct dc_plane_dcc_param *dcc,
4344 struct dc_plane_address *address,
4346 bool force_disable_dcc)
4348 const struct drm_framebuffer *fb = &afb->base;
4351 memset(tiling_info, 0, sizeof(*tiling_info));
4352 memset(plane_size, 0, sizeof(*plane_size));
4353 memset(dcc, 0, sizeof(*dcc));
4354 memset(address, 0, sizeof(*address));
4356 address->tmz_surface = tmz_surface;
4358 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4359 uint64_t addr = afb->address + fb->offsets[0];
4361 plane_size->surface_size.x = 0;
4362 plane_size->surface_size.y = 0;
4363 plane_size->surface_size.width = fb->width;
4364 plane_size->surface_size.height = fb->height;
4365 plane_size->surface_pitch =
4366 fb->pitches[0] / fb->format->cpp[0];
4368 address->type = PLN_ADDR_TYPE_GRAPHICS;
4369 address->grph.addr.low_part = lower_32_bits(addr);
4370 address->grph.addr.high_part = upper_32_bits(addr);
4371 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4372 uint64_t luma_addr = afb->address + fb->offsets[0];
4373 uint64_t chroma_addr = afb->address + fb->offsets[1];
4375 plane_size->surface_size.x = 0;
4376 plane_size->surface_size.y = 0;
4377 plane_size->surface_size.width = fb->width;
4378 plane_size->surface_size.height = fb->height;
4379 plane_size->surface_pitch =
4380 fb->pitches[0] / fb->format->cpp[0];
4382 plane_size->chroma_size.x = 0;
4383 plane_size->chroma_size.y = 0;
4384 /* TODO: set these based on surface format */
4385 plane_size->chroma_size.width = fb->width / 2;
4386 plane_size->chroma_size.height = fb->height / 2;
4388 plane_size->chroma_pitch =
4389 fb->pitches[1] / fb->format->cpp[1];
4391 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4392 address->video_progressive.luma_addr.low_part =
4393 lower_32_bits(luma_addr);
4394 address->video_progressive.luma_addr.high_part =
4395 upper_32_bits(luma_addr);
4396 address->video_progressive.chroma_addr.low_part =
4397 lower_32_bits(chroma_addr);
4398 address->video_progressive.chroma_addr.high_part =
4399 upper_32_bits(chroma_addr);
4402 if (adev->family >= AMDGPU_FAMILY_AI) {
4403 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4404 rotation, plane_size,
4411 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4418 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4419 bool *per_pixel_alpha, bool *global_alpha,
4420 int *global_alpha_value)
4422 *per_pixel_alpha = false;
4423 *global_alpha = false;
4424 *global_alpha_value = 0xff;
4426 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4429 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4430 static const uint32_t alpha_formats[] = {
4431 DRM_FORMAT_ARGB8888,
4432 DRM_FORMAT_RGBA8888,
4433 DRM_FORMAT_ABGR8888,
4435 uint32_t format = plane_state->fb->format->format;
4438 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4439 if (format == alpha_formats[i]) {
4440 *per_pixel_alpha = true;
4446 if (plane_state->alpha < 0xffff) {
4447 *global_alpha = true;
4448 *global_alpha_value = plane_state->alpha >> 8;
4453 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4454 const enum surface_pixel_format format,
4455 enum dc_color_space *color_space)
4459 *color_space = COLOR_SPACE_SRGB;
4461 /* DRM color properties only affect non-RGB formats. */
4462 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4465 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4467 switch (plane_state->color_encoding) {
4468 case DRM_COLOR_YCBCR_BT601:
4470 *color_space = COLOR_SPACE_YCBCR601;
4472 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4475 case DRM_COLOR_YCBCR_BT709:
4477 *color_space = COLOR_SPACE_YCBCR709;
4479 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4482 case DRM_COLOR_YCBCR_BT2020:
4484 *color_space = COLOR_SPACE_2020_YCBCR;
4497 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4498 const struct drm_plane_state *plane_state,
4499 const uint64_t tiling_flags,
4500 struct dc_plane_info *plane_info,
4501 struct dc_plane_address *address,
4503 bool force_disable_dcc)
4505 const struct drm_framebuffer *fb = plane_state->fb;
4506 const struct amdgpu_framebuffer *afb =
4507 to_amdgpu_framebuffer(plane_state->fb);
4508 struct drm_format_name_buf format_name;
4511 memset(plane_info, 0, sizeof(*plane_info));
4513 switch (fb->format->format) {
4515 plane_info->format =
4516 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4518 case DRM_FORMAT_RGB565:
4519 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4521 case DRM_FORMAT_XRGB8888:
4522 case DRM_FORMAT_ARGB8888:
4523 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4525 case DRM_FORMAT_XRGB2101010:
4526 case DRM_FORMAT_ARGB2101010:
4527 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4529 case DRM_FORMAT_XBGR2101010:
4530 case DRM_FORMAT_ABGR2101010:
4531 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4533 case DRM_FORMAT_XBGR8888:
4534 case DRM_FORMAT_ABGR8888:
4535 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4537 case DRM_FORMAT_NV21:
4538 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4540 case DRM_FORMAT_NV12:
4541 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4543 case DRM_FORMAT_P010:
4544 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4546 case DRM_FORMAT_XRGB16161616F:
4547 case DRM_FORMAT_ARGB16161616F:
4548 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4550 case DRM_FORMAT_XBGR16161616F:
4551 case DRM_FORMAT_ABGR16161616F:
4552 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4556 "Unsupported screen format %s\n",
4557 drm_get_format_name(fb->format->format, &format_name));
4561 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4562 case DRM_MODE_ROTATE_0:
4563 plane_info->rotation = ROTATION_ANGLE_0;
4565 case DRM_MODE_ROTATE_90:
4566 plane_info->rotation = ROTATION_ANGLE_90;
4568 case DRM_MODE_ROTATE_180:
4569 plane_info->rotation = ROTATION_ANGLE_180;
4571 case DRM_MODE_ROTATE_270:
4572 plane_info->rotation = ROTATION_ANGLE_270;
4575 plane_info->rotation = ROTATION_ANGLE_0;
4579 plane_info->visible = true;
4580 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4582 plane_info->layer_index = 0;
4584 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4585 &plane_info->color_space);
4589 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4590 plane_info->rotation, tiling_flags,
4591 &plane_info->tiling_info,
4592 &plane_info->plane_size,
4593 &plane_info->dcc, address, tmz_surface,
4598 fill_blending_from_plane_state(
4599 plane_state, &plane_info->per_pixel_alpha,
4600 &plane_info->global_alpha, &plane_info->global_alpha_value);
4605 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4606 struct dc_plane_state *dc_plane_state,
4607 struct drm_plane_state *plane_state,
4608 struct drm_crtc_state *crtc_state)
4610 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4611 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4612 struct dc_scaling_info scaling_info;
4613 struct dc_plane_info plane_info;
4615 bool force_disable_dcc = false;
4617 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4621 dc_plane_state->src_rect = scaling_info.src_rect;
4622 dc_plane_state->dst_rect = scaling_info.dst_rect;
4623 dc_plane_state->clip_rect = scaling_info.clip_rect;
4624 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4626 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4627 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4630 &dc_plane_state->address,
4636 dc_plane_state->format = plane_info.format;
4637 dc_plane_state->color_space = plane_info.color_space;
4638 dc_plane_state->format = plane_info.format;
4639 dc_plane_state->plane_size = plane_info.plane_size;
4640 dc_plane_state->rotation = plane_info.rotation;
4641 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4642 dc_plane_state->stereo_format = plane_info.stereo_format;
4643 dc_plane_state->tiling_info = plane_info.tiling_info;
4644 dc_plane_state->visible = plane_info.visible;
4645 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4646 dc_plane_state->global_alpha = plane_info.global_alpha;
4647 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4648 dc_plane_state->dcc = plane_info.dcc;
4649 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4652 * Always set input transfer function, since plane state is refreshed
4655 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4662 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4663 const struct dm_connector_state *dm_state,
4664 struct dc_stream_state *stream)
4666 enum amdgpu_rmx_type rmx_type;
4668 struct rect src = { 0 }; /* viewport in composition space*/
4669 struct rect dst = { 0 }; /* stream addressable area */
4671 /* no mode. nothing to be done */
4675 /* Full screen scaling by default */
4676 src.width = mode->hdisplay;
4677 src.height = mode->vdisplay;
4678 dst.width = stream->timing.h_addressable;
4679 dst.height = stream->timing.v_addressable;
4682 rmx_type = dm_state->scaling;
4683 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4684 if (src.width * dst.height <
4685 src.height * dst.width) {
4686 /* height needs less upscaling/more downscaling */
4687 dst.width = src.width *
4688 dst.height / src.height;
4690 /* width needs less upscaling/more downscaling */
4691 dst.height = src.height *
4692 dst.width / src.width;
4694 } else if (rmx_type == RMX_CENTER) {
4698 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4699 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4701 if (dm_state->underscan_enable) {
4702 dst.x += dm_state->underscan_hborder / 2;
4703 dst.y += dm_state->underscan_vborder / 2;
4704 dst.width -= dm_state->underscan_hborder;
4705 dst.height -= dm_state->underscan_vborder;
4712 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4713 dst.x, dst.y, dst.width, dst.height);
4717 static enum dc_color_depth
4718 convert_color_depth_from_display_info(const struct drm_connector *connector,
4719 bool is_y420, int requested_bpc)
4726 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4727 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4729 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4731 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4734 bpc = (uint8_t)connector->display_info.bpc;
4735 /* Assume 8 bpc by default if no bpc is specified. */
4736 bpc = bpc ? bpc : 8;
4739 if (requested_bpc > 0) {
4741 * Cap display bpc based on the user requested value.
4743 * The value for state->max_bpc may not correctly updated
4744 * depending on when the connector gets added to the state
4745 * or if this was called outside of atomic check, so it
4746 * can't be used directly.
4748 bpc = min_t(u8, bpc, requested_bpc);
4750 /* Round down to the nearest even number. */
4751 bpc = bpc - (bpc & 1);
4757 * Temporary Work around, DRM doesn't parse color depth for
4758 * EDID revision before 1.4
4759 * TODO: Fix edid parsing
4761 return COLOR_DEPTH_888;
4763 return COLOR_DEPTH_666;
4765 return COLOR_DEPTH_888;
4767 return COLOR_DEPTH_101010;
4769 return COLOR_DEPTH_121212;
4771 return COLOR_DEPTH_141414;
4773 return COLOR_DEPTH_161616;
4775 return COLOR_DEPTH_UNDEFINED;
4779 static enum dc_aspect_ratio
4780 get_aspect_ratio(const struct drm_display_mode *mode_in)
4782 /* 1-1 mapping, since both enums follow the HDMI spec. */
4783 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4786 static enum dc_color_space
4787 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4789 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4791 switch (dc_crtc_timing->pixel_encoding) {
4792 case PIXEL_ENCODING_YCBCR422:
4793 case PIXEL_ENCODING_YCBCR444:
4794 case PIXEL_ENCODING_YCBCR420:
4797 * 27030khz is the separation point between HDTV and SDTV
4798 * according to HDMI spec, we use YCbCr709 and YCbCr601
4801 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4802 if (dc_crtc_timing->flags.Y_ONLY)
4804 COLOR_SPACE_YCBCR709_LIMITED;
4806 color_space = COLOR_SPACE_YCBCR709;
4808 if (dc_crtc_timing->flags.Y_ONLY)
4810 COLOR_SPACE_YCBCR601_LIMITED;
4812 color_space = COLOR_SPACE_YCBCR601;
4817 case PIXEL_ENCODING_RGB:
4818 color_space = COLOR_SPACE_SRGB;
4829 static bool adjust_colour_depth_from_display_info(
4830 struct dc_crtc_timing *timing_out,
4831 const struct drm_display_info *info)
4833 enum dc_color_depth depth = timing_out->display_color_depth;
4836 normalized_clk = timing_out->pix_clk_100hz / 10;
4837 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4838 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4839 normalized_clk /= 2;
4840 /* Adjusting pix clock following on HDMI spec based on colour depth */
4842 case COLOR_DEPTH_888:
4844 case COLOR_DEPTH_101010:
4845 normalized_clk = (normalized_clk * 30) / 24;
4847 case COLOR_DEPTH_121212:
4848 normalized_clk = (normalized_clk * 36) / 24;
4850 case COLOR_DEPTH_161616:
4851 normalized_clk = (normalized_clk * 48) / 24;
4854 /* The above depths are the only ones valid for HDMI. */
4857 if (normalized_clk <= info->max_tmds_clock) {
4858 timing_out->display_color_depth = depth;
4861 } while (--depth > COLOR_DEPTH_666);
4865 static void fill_stream_properties_from_drm_display_mode(
4866 struct dc_stream_state *stream,
4867 const struct drm_display_mode *mode_in,
4868 const struct drm_connector *connector,
4869 const struct drm_connector_state *connector_state,
4870 const struct dc_stream_state *old_stream,
4873 struct dc_crtc_timing *timing_out = &stream->timing;
4874 const struct drm_display_info *info = &connector->display_info;
4875 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4876 struct hdmi_vendor_infoframe hv_frame;
4877 struct hdmi_avi_infoframe avi_frame;
4879 memset(&hv_frame, 0, sizeof(hv_frame));
4880 memset(&avi_frame, 0, sizeof(avi_frame));
4882 timing_out->h_border_left = 0;
4883 timing_out->h_border_right = 0;
4884 timing_out->v_border_top = 0;
4885 timing_out->v_border_bottom = 0;
4886 /* TODO: un-hardcode */
4887 if (drm_mode_is_420_only(info, mode_in)
4888 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4889 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4890 else if (drm_mode_is_420_also(info, mode_in)
4891 && aconnector->force_yuv420_output)
4892 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4893 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4894 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4895 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4897 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4899 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4900 timing_out->display_color_depth = convert_color_depth_from_display_info(
4902 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4904 timing_out->scan_type = SCANNING_TYPE_NODATA;
4905 timing_out->hdmi_vic = 0;
4908 timing_out->vic = old_stream->timing.vic;
4909 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4910 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4912 timing_out->vic = drm_match_cea_mode(mode_in);
4913 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4914 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4915 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4916 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4919 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4920 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4921 timing_out->vic = avi_frame.video_code;
4922 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4923 timing_out->hdmi_vic = hv_frame.vic;
4926 timing_out->h_addressable = mode_in->crtc_hdisplay;
4927 timing_out->h_total = mode_in->crtc_htotal;
4928 timing_out->h_sync_width =
4929 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4930 timing_out->h_front_porch =
4931 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4932 timing_out->v_total = mode_in->crtc_vtotal;
4933 timing_out->v_addressable = mode_in->crtc_vdisplay;
4934 timing_out->v_front_porch =
4935 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4936 timing_out->v_sync_width =
4937 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4938 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4939 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4941 stream->output_color_space = get_output_color_space(timing_out);
4943 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4944 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4945 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4946 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4947 drm_mode_is_420_also(info, mode_in) &&
4948 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4949 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4950 adjust_colour_depth_from_display_info(timing_out, info);
4955 static void fill_audio_info(struct audio_info *audio_info,
4956 const struct drm_connector *drm_connector,
4957 const struct dc_sink *dc_sink)
4960 int cea_revision = 0;
4961 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4963 audio_info->manufacture_id = edid_caps->manufacturer_id;
4964 audio_info->product_id = edid_caps->product_id;
4966 cea_revision = drm_connector->display_info.cea_rev;
4968 strscpy(audio_info->display_name,
4969 edid_caps->display_name,
4970 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4972 if (cea_revision >= 3) {
4973 audio_info->mode_count = edid_caps->audio_mode_count;
4975 for (i = 0; i < audio_info->mode_count; ++i) {
4976 audio_info->modes[i].format_code =
4977 (enum audio_format_code)
4978 (edid_caps->audio_modes[i].format_code);
4979 audio_info->modes[i].channel_count =
4980 edid_caps->audio_modes[i].channel_count;
4981 audio_info->modes[i].sample_rates.all =
4982 edid_caps->audio_modes[i].sample_rate;
4983 audio_info->modes[i].sample_size =
4984 edid_caps->audio_modes[i].sample_size;
4988 audio_info->flags.all = edid_caps->speaker_flags;
4990 /* TODO: We only check for the progressive mode, check for interlace mode too */
4991 if (drm_connector->latency_present[0]) {
4992 audio_info->video_latency = drm_connector->video_latency[0];
4993 audio_info->audio_latency = drm_connector->audio_latency[0];
4996 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5001 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5002 struct drm_display_mode *dst_mode)
5004 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5005 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5006 dst_mode->crtc_clock = src_mode->crtc_clock;
5007 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5008 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5009 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5010 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5011 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5012 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5013 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5014 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5015 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5016 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5017 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5021 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5022 const struct drm_display_mode *native_mode,
5025 if (scale_enabled) {
5026 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5027 } else if (native_mode->clock == drm_mode->clock &&
5028 native_mode->htotal == drm_mode->htotal &&
5029 native_mode->vtotal == drm_mode->vtotal) {
5030 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5032 /* no scaling nor amdgpu inserted, no need to patch */
5036 static struct dc_sink *
5037 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5039 struct dc_sink_init_data sink_init_data = { 0 };
5040 struct dc_sink *sink = NULL;
5041 sink_init_data.link = aconnector->dc_link;
5042 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5044 sink = dc_sink_create(&sink_init_data);
5046 DRM_ERROR("Failed to create sink!\n");
5049 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5054 static void set_multisync_trigger_params(
5055 struct dc_stream_state *stream)
5057 if (stream->triggered_crtc_reset.enabled) {
5058 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5059 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5063 static void set_master_stream(struct dc_stream_state *stream_set[],
5066 int j, highest_rfr = 0, master_stream = 0;
5068 for (j = 0; j < stream_count; j++) {
5069 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5070 int refresh_rate = 0;
5072 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5073 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5074 if (refresh_rate > highest_rfr) {
5075 highest_rfr = refresh_rate;
5080 for (j = 0; j < stream_count; j++) {
5082 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5086 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5090 if (context->stream_count < 2)
5092 for (i = 0; i < context->stream_count ; i++) {
5093 if (!context->streams[i])
5096 * TODO: add a function to read AMD VSDB bits and set
5097 * crtc_sync_master.multi_sync_enabled flag
5098 * For now it's set to false
5100 set_multisync_trigger_params(context->streams[i]);
5102 set_master_stream(context->streams, context->stream_count);
5105 static struct dc_stream_state *
5106 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5107 const struct drm_display_mode *drm_mode,
5108 const struct dm_connector_state *dm_state,
5109 const struct dc_stream_state *old_stream,
5112 struct drm_display_mode *preferred_mode = NULL;
5113 struct drm_connector *drm_connector;
5114 const struct drm_connector_state *con_state =
5115 dm_state ? &dm_state->base : NULL;
5116 struct dc_stream_state *stream = NULL;
5117 struct drm_display_mode mode = *drm_mode;
5118 bool native_mode_found = false;
5119 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5121 int preferred_refresh = 0;
5122 #if defined(CONFIG_DRM_AMD_DC_DCN)
5123 struct dsc_dec_dpcd_caps dsc_caps;
5124 uint32_t link_bandwidth_kbps;
5126 struct dc_sink *sink = NULL;
5127 if (aconnector == NULL) {
5128 DRM_ERROR("aconnector is NULL!\n");
5132 drm_connector = &aconnector->base;
5134 if (!aconnector->dc_sink) {
5135 sink = create_fake_sink(aconnector);
5139 sink = aconnector->dc_sink;
5140 dc_sink_retain(sink);
5143 stream = dc_create_stream_for_sink(sink);
5145 if (stream == NULL) {
5146 DRM_ERROR("Failed to create stream for sink!\n");
5150 stream->dm_stream_context = aconnector;
5152 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5153 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5155 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5156 /* Search for preferred mode */
5157 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5158 native_mode_found = true;
5162 if (!native_mode_found)
5163 preferred_mode = list_first_entry_or_null(
5164 &aconnector->base.modes,
5165 struct drm_display_mode,
5168 mode_refresh = drm_mode_vrefresh(&mode);
5170 if (preferred_mode == NULL) {
5172 * This may not be an error, the use case is when we have no
5173 * usermode calls to reset and set mode upon hotplug. In this
5174 * case, we call set mode ourselves to restore the previous mode
5175 * and the modelist may not be filled in in time.
5177 DRM_DEBUG_DRIVER("No preferred mode found\n");
5179 decide_crtc_timing_for_drm_display_mode(
5180 &mode, preferred_mode,
5181 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5182 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5186 drm_mode_set_crtcinfo(&mode, 0);
5189 * If scaling is enabled and refresh rate didn't change
5190 * we copy the vic and polarities of the old timings
5192 if (!scale || mode_refresh != preferred_refresh)
5193 fill_stream_properties_from_drm_display_mode(stream,
5194 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5196 fill_stream_properties_from_drm_display_mode(stream,
5197 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5199 stream->timing.flags.DSC = 0;
5201 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5202 #if defined(CONFIG_DRM_AMD_DC_DCN)
5203 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5204 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5205 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5207 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5208 dc_link_get_link_cap(aconnector->dc_link));
5210 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5211 /* Set DSC policy according to dsc_clock_en */
5212 dc_dsc_policy_set_enable_dsc_when_not_needed(
5213 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5215 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5217 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5219 link_bandwidth_kbps,
5221 &stream->timing.dsc_cfg))
5222 stream->timing.flags.DSC = 1;
5223 /* Overwrite the stream flag if DSC is enabled through debugfs */
5224 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5225 stream->timing.flags.DSC = 1;
5227 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5228 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5230 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5231 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5233 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5234 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5239 update_stream_scaling_settings(&mode, dm_state, stream);
5242 &stream->audio_info,
5246 update_stream_signal(stream, sink);
5248 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5249 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5251 if (stream->link->psr_settings.psr_feature_enabled) {
5253 // should decide stream support vsc sdp colorimetry capability
5254 // before building vsc info packet
5256 stream->use_vsc_sdp_for_colorimetry = false;
5257 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5258 stream->use_vsc_sdp_for_colorimetry =
5259 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5261 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5262 stream->use_vsc_sdp_for_colorimetry = true;
5264 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5267 dc_sink_release(sink);
5272 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5274 drm_crtc_cleanup(crtc);
5278 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5279 struct drm_crtc_state *state)
5281 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5283 /* TODO Destroy dc_stream objects are stream object is flattened */
5285 dc_stream_release(cur->stream);
5288 __drm_atomic_helper_crtc_destroy_state(state);
5294 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5296 struct dm_crtc_state *state;
5299 dm_crtc_destroy_state(crtc, crtc->state);
5301 state = kzalloc(sizeof(*state), GFP_KERNEL);
5302 if (WARN_ON(!state))
5305 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5308 static struct drm_crtc_state *
5309 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5311 struct dm_crtc_state *state, *cur;
5313 cur = to_dm_crtc_state(crtc->state);
5315 if (WARN_ON(!crtc->state))
5318 state = kzalloc(sizeof(*state), GFP_KERNEL);
5322 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5325 state->stream = cur->stream;
5326 dc_stream_retain(state->stream);
5329 state->active_planes = cur->active_planes;
5330 state->vrr_infopacket = cur->vrr_infopacket;
5331 state->abm_level = cur->abm_level;
5332 state->vrr_supported = cur->vrr_supported;
5333 state->freesync_config = cur->freesync_config;
5334 state->crc_src = cur->crc_src;
5335 state->cm_has_degamma = cur->cm_has_degamma;
5336 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5337 #ifdef CONFIG_DEBUG_FS
5338 state->crc_window = cur->crc_window;
5340 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5342 return &state->base;
5345 #ifdef CONFIG_DEBUG_FS
5346 static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5347 struct drm_crtc_state *crtc_state,
5348 struct drm_property *property,
5351 struct drm_device *dev = crtc->dev;
5352 struct amdgpu_device *adev = drm_to_adev(dev);
5353 struct dm_crtc_state *dm_new_state =
5354 to_dm_crtc_state(crtc_state);
5356 if (property == adev->dm.crc_win_x_start_property)
5357 dm_new_state->crc_window.x_start = val;
5358 else if (property == adev->dm.crc_win_y_start_property)
5359 dm_new_state->crc_window.y_start = val;
5360 else if (property == adev->dm.crc_win_x_end_property)
5361 dm_new_state->crc_window.x_end = val;
5362 else if (property == adev->dm.crc_win_y_end_property)
5363 dm_new_state->crc_window.y_end = val;
5370 static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5371 const struct drm_crtc_state *state,
5372 struct drm_property *property,
5375 struct drm_device *dev = crtc->dev;
5376 struct amdgpu_device *adev = drm_to_adev(dev);
5377 struct dm_crtc_state *dm_state =
5378 to_dm_crtc_state(state);
5380 if (property == adev->dm.crc_win_x_start_property)
5381 *val = dm_state->crc_window.x_start;
5382 else if (property == adev->dm.crc_win_y_start_property)
5383 *val = dm_state->crc_window.y_start;
5384 else if (property == adev->dm.crc_win_x_end_property)
5385 *val = dm_state->crc_window.x_end;
5386 else if (property == adev->dm.crc_win_y_end_property)
5387 *val = dm_state->crc_window.y_end;
5395 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5397 enum dc_irq_source irq_source;
5398 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5399 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5402 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5404 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5406 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5407 acrtc->crtc_id, enable ? "en" : "dis", rc);
5411 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5413 enum dc_irq_source irq_source;
5414 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5415 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5416 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5420 /* vblank irq on -> Only need vupdate irq in vrr mode */
5421 if (amdgpu_dm_vrr_active(acrtc_state))
5422 rc = dm_set_vupdate_irq(crtc, true);
5424 /* vblank irq off -> vupdate irq off */
5425 rc = dm_set_vupdate_irq(crtc, false);
5431 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5432 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5435 static int dm_enable_vblank(struct drm_crtc *crtc)
5437 return dm_set_vblank(crtc, true);
5440 static void dm_disable_vblank(struct drm_crtc *crtc)
5442 dm_set_vblank(crtc, false);
5445 /* Implemented only the options currently availible for the driver */
5446 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5447 .reset = dm_crtc_reset_state,
5448 .destroy = amdgpu_dm_crtc_destroy,
5449 .set_config = drm_atomic_helper_set_config,
5450 .page_flip = drm_atomic_helper_page_flip,
5451 .atomic_duplicate_state = dm_crtc_duplicate_state,
5452 .atomic_destroy_state = dm_crtc_destroy_state,
5453 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5454 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5455 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5456 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5457 .enable_vblank = dm_enable_vblank,
5458 .disable_vblank = dm_disable_vblank,
5459 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5460 #ifdef CONFIG_DEBUG_FS
5461 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5462 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5466 static enum drm_connector_status
5467 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5470 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5474 * 1. This interface is NOT called in context of HPD irq.
5475 * 2. This interface *is called* in context of user-mode ioctl. Which
5476 * makes it a bad place for *any* MST-related activity.
5479 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5480 !aconnector->fake_enable)
5481 connected = (aconnector->dc_sink != NULL);
5483 connected = (aconnector->base.force == DRM_FORCE_ON);
5485 update_subconnector_property(aconnector);
5487 return (connected ? connector_status_connected :
5488 connector_status_disconnected);
5491 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5492 struct drm_connector_state *connector_state,
5493 struct drm_property *property,
5496 struct drm_device *dev = connector->dev;
5497 struct amdgpu_device *adev = drm_to_adev(dev);
5498 struct dm_connector_state *dm_old_state =
5499 to_dm_connector_state(connector->state);
5500 struct dm_connector_state *dm_new_state =
5501 to_dm_connector_state(connector_state);
5505 if (property == dev->mode_config.scaling_mode_property) {
5506 enum amdgpu_rmx_type rmx_type;
5509 case DRM_MODE_SCALE_CENTER:
5510 rmx_type = RMX_CENTER;
5512 case DRM_MODE_SCALE_ASPECT:
5513 rmx_type = RMX_ASPECT;
5515 case DRM_MODE_SCALE_FULLSCREEN:
5516 rmx_type = RMX_FULL;
5518 case DRM_MODE_SCALE_NONE:
5524 if (dm_old_state->scaling == rmx_type)
5527 dm_new_state->scaling = rmx_type;
5529 } else if (property == adev->mode_info.underscan_hborder_property) {
5530 dm_new_state->underscan_hborder = val;
5532 } else if (property == adev->mode_info.underscan_vborder_property) {
5533 dm_new_state->underscan_vborder = val;
5535 } else if (property == adev->mode_info.underscan_property) {
5536 dm_new_state->underscan_enable = val;
5538 } else if (property == adev->mode_info.abm_level_property) {
5539 dm_new_state->abm_level = val;
5546 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5547 const struct drm_connector_state *state,
5548 struct drm_property *property,
5551 struct drm_device *dev = connector->dev;
5552 struct amdgpu_device *adev = drm_to_adev(dev);
5553 struct dm_connector_state *dm_state =
5554 to_dm_connector_state(state);
5557 if (property == dev->mode_config.scaling_mode_property) {
5558 switch (dm_state->scaling) {
5560 *val = DRM_MODE_SCALE_CENTER;
5563 *val = DRM_MODE_SCALE_ASPECT;
5566 *val = DRM_MODE_SCALE_FULLSCREEN;
5570 *val = DRM_MODE_SCALE_NONE;
5574 } else if (property == adev->mode_info.underscan_hborder_property) {
5575 *val = dm_state->underscan_hborder;
5577 } else if (property == adev->mode_info.underscan_vborder_property) {
5578 *val = dm_state->underscan_vborder;
5580 } else if (property == adev->mode_info.underscan_property) {
5581 *val = dm_state->underscan_enable;
5583 } else if (property == adev->mode_info.abm_level_property) {
5584 *val = dm_state->abm_level;
5591 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5593 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5595 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5598 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5600 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5601 const struct dc_link *link = aconnector->dc_link;
5602 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5603 struct amdgpu_display_manager *dm = &adev->dm;
5606 * Call only if mst_mgr was iniitalized before since it's not done
5607 * for all connector types.
5609 if (aconnector->mst_mgr.dev)
5610 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5612 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5613 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5615 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5616 link->type != dc_connection_none &&
5617 dm->backlight_dev) {
5618 backlight_device_unregister(dm->backlight_dev);
5619 dm->backlight_dev = NULL;
5623 if (aconnector->dc_em_sink)
5624 dc_sink_release(aconnector->dc_em_sink);
5625 aconnector->dc_em_sink = NULL;
5626 if (aconnector->dc_sink)
5627 dc_sink_release(aconnector->dc_sink);
5628 aconnector->dc_sink = NULL;
5630 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5631 drm_connector_unregister(connector);
5632 drm_connector_cleanup(connector);
5633 if (aconnector->i2c) {
5634 i2c_del_adapter(&aconnector->i2c->base);
5635 kfree(aconnector->i2c);
5637 kfree(aconnector->dm_dp_aux.aux.name);
5642 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5644 struct dm_connector_state *state =
5645 to_dm_connector_state(connector->state);
5647 if (connector->state)
5648 __drm_atomic_helper_connector_destroy_state(connector->state);
5652 state = kzalloc(sizeof(*state), GFP_KERNEL);
5655 state->scaling = RMX_OFF;
5656 state->underscan_enable = false;
5657 state->underscan_hborder = 0;
5658 state->underscan_vborder = 0;
5659 state->base.max_requested_bpc = 8;
5660 state->vcpi_slots = 0;
5662 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5663 state->abm_level = amdgpu_dm_abm_level;
5665 __drm_atomic_helper_connector_reset(connector, &state->base);
5669 struct drm_connector_state *
5670 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5672 struct dm_connector_state *state =
5673 to_dm_connector_state(connector->state);
5675 struct dm_connector_state *new_state =
5676 kmemdup(state, sizeof(*state), GFP_KERNEL);
5681 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5683 new_state->freesync_capable = state->freesync_capable;
5684 new_state->abm_level = state->abm_level;
5685 new_state->scaling = state->scaling;
5686 new_state->underscan_enable = state->underscan_enable;
5687 new_state->underscan_hborder = state->underscan_hborder;
5688 new_state->underscan_vborder = state->underscan_vborder;
5689 new_state->vcpi_slots = state->vcpi_slots;
5690 new_state->pbn = state->pbn;
5691 return &new_state->base;
5695 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5697 struct amdgpu_dm_connector *amdgpu_dm_connector =
5698 to_amdgpu_dm_connector(connector);
5701 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5702 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5703 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5704 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5709 #if defined(CONFIG_DEBUG_FS)
5710 connector_debugfs_init(amdgpu_dm_connector);
5716 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5717 .reset = amdgpu_dm_connector_funcs_reset,
5718 .detect = amdgpu_dm_connector_detect,
5719 .fill_modes = drm_helper_probe_single_connector_modes,
5720 .destroy = amdgpu_dm_connector_destroy,
5721 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5722 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5723 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5724 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5725 .late_register = amdgpu_dm_connector_late_register,
5726 .early_unregister = amdgpu_dm_connector_unregister
5729 static int get_modes(struct drm_connector *connector)
5731 return amdgpu_dm_connector_get_modes(connector);
5734 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5736 struct dc_sink_init_data init_params = {
5737 .link = aconnector->dc_link,
5738 .sink_signal = SIGNAL_TYPE_VIRTUAL
5742 if (!aconnector->base.edid_blob_ptr) {
5743 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5744 aconnector->base.name);
5746 aconnector->base.force = DRM_FORCE_OFF;
5747 aconnector->base.override_edid = false;
5751 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5753 aconnector->edid = edid;
5755 aconnector->dc_em_sink = dc_link_add_remote_sink(
5756 aconnector->dc_link,
5758 (edid->extensions + 1) * EDID_LENGTH,
5761 if (aconnector->base.force == DRM_FORCE_ON) {
5762 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5763 aconnector->dc_link->local_sink :
5764 aconnector->dc_em_sink;
5765 dc_sink_retain(aconnector->dc_sink);
5769 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5771 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5774 * In case of headless boot with force on for DP managed connector
5775 * Those settings have to be != 0 to get initial modeset
5777 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5778 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5779 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5783 aconnector->base.override_edid = true;
5784 create_eml_sink(aconnector);
5787 static struct dc_stream_state *
5788 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5789 const struct drm_display_mode *drm_mode,
5790 const struct dm_connector_state *dm_state,
5791 const struct dc_stream_state *old_stream)
5793 struct drm_connector *connector = &aconnector->base;
5794 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5795 struct dc_stream_state *stream;
5796 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5797 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5798 enum dc_status dc_result = DC_OK;
5801 stream = create_stream_for_sink(aconnector, drm_mode,
5802 dm_state, old_stream,
5804 if (stream == NULL) {
5805 DRM_ERROR("Failed to create stream for sink!\n");
5809 dc_result = dc_validate_stream(adev->dm.dc, stream);
5811 if (dc_result != DC_OK) {
5812 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5817 dc_status_to_str(dc_result));
5819 dc_stream_release(stream);
5821 requested_bpc -= 2; /* lower bpc to retry validation */
5824 } while (stream == NULL && requested_bpc >= 6);
5829 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5830 struct drm_display_mode *mode)
5832 int result = MODE_ERROR;
5833 struct dc_sink *dc_sink;
5834 /* TODO: Unhardcode stream count */
5835 struct dc_stream_state *stream;
5836 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5838 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5839 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5843 * Only run this the first time mode_valid is called to initilialize
5846 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5847 !aconnector->dc_em_sink)
5848 handle_edid_mgmt(aconnector);
5850 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5852 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5853 aconnector->base.force != DRM_FORCE_ON) {
5854 DRM_ERROR("dc_sink is NULL!\n");
5858 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5860 dc_stream_release(stream);
5865 /* TODO: error handling*/
5869 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5870 struct dc_info_packet *out)
5872 struct hdmi_drm_infoframe frame;
5873 unsigned char buf[30]; /* 26 + 4 */
5877 memset(out, 0, sizeof(*out));
5879 if (!state->hdr_output_metadata)
5882 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5886 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5890 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5894 /* Prepare the infopacket for DC. */
5895 switch (state->connector->connector_type) {
5896 case DRM_MODE_CONNECTOR_HDMIA:
5897 out->hb0 = 0x87; /* type */
5898 out->hb1 = 0x01; /* version */
5899 out->hb2 = 0x1A; /* length */
5900 out->sb[0] = buf[3]; /* checksum */
5904 case DRM_MODE_CONNECTOR_DisplayPort:
5905 case DRM_MODE_CONNECTOR_eDP:
5906 out->hb0 = 0x00; /* sdp id, zero */
5907 out->hb1 = 0x87; /* type */
5908 out->hb2 = 0x1D; /* payload len - 1 */
5909 out->hb3 = (0x13 << 2); /* sdp version */
5910 out->sb[0] = 0x01; /* version */
5911 out->sb[1] = 0x1A; /* length */
5919 memcpy(&out->sb[i], &buf[4], 26);
5922 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5923 sizeof(out->sb), false);
5929 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5930 const struct drm_connector_state *new_state)
5932 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5933 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5935 if (old_blob != new_blob) {
5936 if (old_blob && new_blob &&
5937 old_blob->length == new_blob->length)
5938 return memcmp(old_blob->data, new_blob->data,
5948 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5949 struct drm_atomic_state *state)
5951 struct drm_connector_state *new_con_state =
5952 drm_atomic_get_new_connector_state(state, conn);
5953 struct drm_connector_state *old_con_state =
5954 drm_atomic_get_old_connector_state(state, conn);
5955 struct drm_crtc *crtc = new_con_state->crtc;
5956 struct drm_crtc_state *new_crtc_state;
5959 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5964 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5965 struct dc_info_packet hdr_infopacket;
5967 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5971 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5972 if (IS_ERR(new_crtc_state))
5973 return PTR_ERR(new_crtc_state);
5976 * DC considers the stream backends changed if the
5977 * static metadata changes. Forcing the modeset also
5978 * gives a simple way for userspace to switch from
5979 * 8bpc to 10bpc when setting the metadata to enter
5982 * Changing the static metadata after it's been
5983 * set is permissible, however. So only force a
5984 * modeset if we're entering or exiting HDR.
5986 new_crtc_state->mode_changed =
5987 !old_con_state->hdr_output_metadata ||
5988 !new_con_state->hdr_output_metadata;
5994 static const struct drm_connector_helper_funcs
5995 amdgpu_dm_connector_helper_funcs = {
5997 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5998 * modes will be filtered by drm_mode_validate_size(), and those modes
5999 * are missing after user start lightdm. So we need to renew modes list.
6000 * in get_modes call back, not just return the modes count
6002 .get_modes = get_modes,
6003 .mode_valid = amdgpu_dm_connector_mode_valid,
6004 .atomic_check = amdgpu_dm_connector_atomic_check,
6007 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6011 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6013 struct drm_atomic_state *state = new_crtc_state->state;
6014 struct drm_plane *plane;
6017 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6018 struct drm_plane_state *new_plane_state;
6020 /* Cursor planes are "fake". */
6021 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6024 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6026 if (!new_plane_state) {
6028 * The plane is enable on the CRTC and hasn't changed
6029 * state. This means that it previously passed
6030 * validation and is therefore enabled.
6036 /* We need a framebuffer to be considered enabled. */
6037 num_active += (new_plane_state->fb != NULL);
6043 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6044 struct drm_crtc_state *new_crtc_state)
6046 struct dm_crtc_state *dm_new_crtc_state =
6047 to_dm_crtc_state(new_crtc_state);
6049 dm_new_crtc_state->active_planes = 0;
6051 if (!dm_new_crtc_state->stream)
6054 dm_new_crtc_state->active_planes =
6055 count_crtc_active_planes(new_crtc_state);
6058 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6059 struct drm_atomic_state *state)
6061 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6063 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6064 struct dc *dc = adev->dm.dc;
6065 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6068 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6070 dm_update_crtc_active_planes(crtc, crtc_state);
6072 if (unlikely(!dm_crtc_state->stream &&
6073 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6079 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6080 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6081 * planes are disabled, which is not supported by the hardware. And there is legacy
6082 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6084 if (crtc_state->enable &&
6085 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6086 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6090 /* In some use cases, like reset, no stream is attached */
6091 if (!dm_crtc_state->stream)
6094 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6097 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6101 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6102 const struct drm_display_mode *mode,
6103 struct drm_display_mode *adjusted_mode)
6108 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6109 .disable = dm_crtc_helper_disable,
6110 .atomic_check = dm_crtc_helper_atomic_check,
6111 .mode_fixup = dm_crtc_helper_mode_fixup,
6112 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6115 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6120 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6122 switch (display_color_depth) {
6123 case COLOR_DEPTH_666:
6125 case COLOR_DEPTH_888:
6127 case COLOR_DEPTH_101010:
6129 case COLOR_DEPTH_121212:
6131 case COLOR_DEPTH_141414:
6133 case COLOR_DEPTH_161616:
6141 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6142 struct drm_crtc_state *crtc_state,
6143 struct drm_connector_state *conn_state)
6145 struct drm_atomic_state *state = crtc_state->state;
6146 struct drm_connector *connector = conn_state->connector;
6147 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6148 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6149 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6150 struct drm_dp_mst_topology_mgr *mst_mgr;
6151 struct drm_dp_mst_port *mst_port;
6152 enum dc_color_depth color_depth;
6154 bool is_y420 = false;
6156 if (!aconnector->port || !aconnector->dc_sink)
6159 mst_port = aconnector->port;
6160 mst_mgr = &aconnector->mst_port->mst_mgr;
6162 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6165 if (!state->duplicated) {
6166 int max_bpc = conn_state->max_requested_bpc;
6167 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6168 aconnector->force_yuv420_output;
6169 color_depth = convert_color_depth_from_display_info(connector,
6172 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6173 clock = adjusted_mode->clock;
6174 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6176 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6179 dm_new_connector_state->pbn,
6180 dm_mst_get_pbn_divider(aconnector->dc_link));
6181 if (dm_new_connector_state->vcpi_slots < 0) {
6182 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6183 return dm_new_connector_state->vcpi_slots;
6188 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6189 .disable = dm_encoder_helper_disable,
6190 .atomic_check = dm_encoder_helper_atomic_check
6193 #if defined(CONFIG_DRM_AMD_DC_DCN)
6194 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6195 struct dc_state *dc_state)
6197 struct dc_stream_state *stream = NULL;
6198 struct drm_connector *connector;
6199 struct drm_connector_state *new_con_state, *old_con_state;
6200 struct amdgpu_dm_connector *aconnector;
6201 struct dm_connector_state *dm_conn_state;
6202 int i, j, clock, bpp;
6203 int vcpi, pbn_div, pbn = 0;
6205 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6207 aconnector = to_amdgpu_dm_connector(connector);
6209 if (!aconnector->port)
6212 if (!new_con_state || !new_con_state->crtc)
6215 dm_conn_state = to_dm_connector_state(new_con_state);
6217 for (j = 0; j < dc_state->stream_count; j++) {
6218 stream = dc_state->streams[j];
6222 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6231 if (stream->timing.flags.DSC != 1) {
6232 drm_dp_mst_atomic_enable_dsc(state,
6240 pbn_div = dm_mst_get_pbn_divider(stream->link);
6241 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6242 clock = stream->timing.pix_clk_100hz / 10;
6243 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6244 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6251 dm_conn_state->pbn = pbn;
6252 dm_conn_state->vcpi_slots = vcpi;
6258 static void dm_drm_plane_reset(struct drm_plane *plane)
6260 struct dm_plane_state *amdgpu_state = NULL;
6263 plane->funcs->atomic_destroy_state(plane, plane->state);
6265 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6266 WARN_ON(amdgpu_state == NULL);
6269 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6272 static struct drm_plane_state *
6273 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6275 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6277 old_dm_plane_state = to_dm_plane_state(plane->state);
6278 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6279 if (!dm_plane_state)
6282 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6284 if (old_dm_plane_state->dc_state) {
6285 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6286 dc_plane_state_retain(dm_plane_state->dc_state);
6289 return &dm_plane_state->base;
6292 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6293 struct drm_plane_state *state)
6295 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6297 if (dm_plane_state->dc_state)
6298 dc_plane_state_release(dm_plane_state->dc_state);
6300 drm_atomic_helper_plane_destroy_state(plane, state);
6303 static const struct drm_plane_funcs dm_plane_funcs = {
6304 .update_plane = drm_atomic_helper_update_plane,
6305 .disable_plane = drm_atomic_helper_disable_plane,
6306 .destroy = drm_primary_helper_destroy,
6307 .reset = dm_drm_plane_reset,
6308 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6309 .atomic_destroy_state = dm_drm_plane_destroy_state,
6310 .format_mod_supported = dm_plane_format_mod_supported,
6313 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6314 struct drm_plane_state *new_state)
6316 struct amdgpu_framebuffer *afb;
6317 struct drm_gem_object *obj;
6318 struct amdgpu_device *adev;
6319 struct amdgpu_bo *rbo;
6320 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6321 struct list_head list;
6322 struct ttm_validate_buffer tv;
6323 struct ww_acquire_ctx ticket;
6327 if (!new_state->fb) {
6328 DRM_DEBUG_DRIVER("No FB bound\n");
6332 afb = to_amdgpu_framebuffer(new_state->fb);
6333 obj = new_state->fb->obj[0];
6334 rbo = gem_to_amdgpu_bo(obj);
6335 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6336 INIT_LIST_HEAD(&list);
6340 list_add(&tv.head, &list);
6342 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6344 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6348 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6349 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6351 domain = AMDGPU_GEM_DOMAIN_VRAM;
6353 r = amdgpu_bo_pin(rbo, domain);
6354 if (unlikely(r != 0)) {
6355 if (r != -ERESTARTSYS)
6356 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6357 ttm_eu_backoff_reservation(&ticket, &list);
6361 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6362 if (unlikely(r != 0)) {
6363 amdgpu_bo_unpin(rbo);
6364 ttm_eu_backoff_reservation(&ticket, &list);
6365 DRM_ERROR("%p bind failed\n", rbo);
6369 ttm_eu_backoff_reservation(&ticket, &list);
6371 afb->address = amdgpu_bo_gpu_offset(rbo);
6376 * We don't do surface updates on planes that have been newly created,
6377 * but we also don't have the afb->address during atomic check.
6379 * Fill in buffer attributes depending on the address here, but only on
6380 * newly created planes since they're not being used by DC yet and this
6381 * won't modify global state.
6383 dm_plane_state_old = to_dm_plane_state(plane->state);
6384 dm_plane_state_new = to_dm_plane_state(new_state);
6386 if (dm_plane_state_new->dc_state &&
6387 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6388 struct dc_plane_state *plane_state =
6389 dm_plane_state_new->dc_state;
6390 bool force_disable_dcc = !plane_state->dcc.enable;
6392 fill_plane_buffer_attributes(
6393 adev, afb, plane_state->format, plane_state->rotation,
6395 &plane_state->tiling_info, &plane_state->plane_size,
6396 &plane_state->dcc, &plane_state->address,
6397 afb->tmz_surface, force_disable_dcc);
6403 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6404 struct drm_plane_state *old_state)
6406 struct amdgpu_bo *rbo;
6412 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6413 r = amdgpu_bo_reserve(rbo, false);
6415 DRM_ERROR("failed to reserve rbo before unpin\n");
6419 amdgpu_bo_unpin(rbo);
6420 amdgpu_bo_unreserve(rbo);
6421 amdgpu_bo_unref(&rbo);
6424 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6425 struct drm_crtc_state *new_crtc_state)
6427 int max_downscale = 0;
6428 int max_upscale = INT_MAX;
6430 /* TODO: These should be checked against DC plane caps */
6431 return drm_atomic_helper_check_plane_state(
6432 state, new_crtc_state, max_downscale, max_upscale, true, true);
6435 static int dm_plane_atomic_check(struct drm_plane *plane,
6436 struct drm_plane_state *state)
6438 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6439 struct dc *dc = adev->dm.dc;
6440 struct dm_plane_state *dm_plane_state;
6441 struct dc_scaling_info scaling_info;
6442 struct drm_crtc_state *new_crtc_state;
6445 trace_amdgpu_dm_plane_atomic_check(state);
6447 dm_plane_state = to_dm_plane_state(state);
6449 if (!dm_plane_state->dc_state)
6453 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6454 if (!new_crtc_state)
6457 ret = dm_plane_helper_check_state(state, new_crtc_state);
6461 ret = fill_dc_scaling_info(state, &scaling_info);
6465 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6471 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6472 struct drm_plane_state *new_plane_state)
6474 /* Only support async updates on cursor planes. */
6475 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6481 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6482 struct drm_plane_state *new_state)
6484 struct drm_plane_state *old_state =
6485 drm_atomic_get_old_plane_state(new_state->state, plane);
6487 trace_amdgpu_dm_atomic_update_cursor(new_state);
6489 swap(plane->state->fb, new_state->fb);
6491 plane->state->src_x = new_state->src_x;
6492 plane->state->src_y = new_state->src_y;
6493 plane->state->src_w = new_state->src_w;
6494 plane->state->src_h = new_state->src_h;
6495 plane->state->crtc_x = new_state->crtc_x;
6496 plane->state->crtc_y = new_state->crtc_y;
6497 plane->state->crtc_w = new_state->crtc_w;
6498 plane->state->crtc_h = new_state->crtc_h;
6500 handle_cursor_update(plane, old_state);
6503 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6504 .prepare_fb = dm_plane_helper_prepare_fb,
6505 .cleanup_fb = dm_plane_helper_cleanup_fb,
6506 .atomic_check = dm_plane_atomic_check,
6507 .atomic_async_check = dm_plane_atomic_async_check,
6508 .atomic_async_update = dm_plane_atomic_async_update
6512 * TODO: these are currently initialized to rgb formats only.
6513 * For future use cases we should either initialize them dynamically based on
6514 * plane capabilities, or initialize this array to all formats, so internal drm
6515 * check will succeed, and let DC implement proper check
6517 static const uint32_t rgb_formats[] = {
6518 DRM_FORMAT_XRGB8888,
6519 DRM_FORMAT_ARGB8888,
6520 DRM_FORMAT_RGBA8888,
6521 DRM_FORMAT_XRGB2101010,
6522 DRM_FORMAT_XBGR2101010,
6523 DRM_FORMAT_ARGB2101010,
6524 DRM_FORMAT_ABGR2101010,
6525 DRM_FORMAT_XBGR8888,
6526 DRM_FORMAT_ABGR8888,
6530 static const uint32_t overlay_formats[] = {
6531 DRM_FORMAT_XRGB8888,
6532 DRM_FORMAT_ARGB8888,
6533 DRM_FORMAT_RGBA8888,
6534 DRM_FORMAT_XBGR8888,
6535 DRM_FORMAT_ABGR8888,
6539 static const u32 cursor_formats[] = {
6543 static int get_plane_formats(const struct drm_plane *plane,
6544 const struct dc_plane_cap *plane_cap,
6545 uint32_t *formats, int max_formats)
6547 int i, num_formats = 0;
6550 * TODO: Query support for each group of formats directly from
6551 * DC plane caps. This will require adding more formats to the
6555 switch (plane->type) {
6556 case DRM_PLANE_TYPE_PRIMARY:
6557 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6558 if (num_formats >= max_formats)
6561 formats[num_formats++] = rgb_formats[i];
6564 if (plane_cap && plane_cap->pixel_format_support.nv12)
6565 formats[num_formats++] = DRM_FORMAT_NV12;
6566 if (plane_cap && plane_cap->pixel_format_support.p010)
6567 formats[num_formats++] = DRM_FORMAT_P010;
6568 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6569 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6570 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6571 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6572 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6576 case DRM_PLANE_TYPE_OVERLAY:
6577 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6578 if (num_formats >= max_formats)
6581 formats[num_formats++] = overlay_formats[i];
6585 case DRM_PLANE_TYPE_CURSOR:
6586 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6587 if (num_formats >= max_formats)
6590 formats[num_formats++] = cursor_formats[i];
6598 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6599 struct drm_plane *plane,
6600 unsigned long possible_crtcs,
6601 const struct dc_plane_cap *plane_cap)
6603 uint32_t formats[32];
6606 unsigned int supported_rotations;
6607 uint64_t *modifiers = NULL;
6609 num_formats = get_plane_formats(plane, plane_cap, formats,
6610 ARRAY_SIZE(formats));
6612 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6616 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6617 &dm_plane_funcs, formats, num_formats,
6618 modifiers, plane->type, NULL);
6623 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6624 plane_cap && plane_cap->per_pixel_alpha) {
6625 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6626 BIT(DRM_MODE_BLEND_PREMULTI);
6628 drm_plane_create_alpha_property(plane);
6629 drm_plane_create_blend_mode_property(plane, blend_caps);
6632 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6634 (plane_cap->pixel_format_support.nv12 ||
6635 plane_cap->pixel_format_support.p010)) {
6636 /* This only affects YUV formats. */
6637 drm_plane_create_color_properties(
6639 BIT(DRM_COLOR_YCBCR_BT601) |
6640 BIT(DRM_COLOR_YCBCR_BT709) |
6641 BIT(DRM_COLOR_YCBCR_BT2020),
6642 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6643 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6644 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6647 supported_rotations =
6648 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6649 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6651 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6652 plane->type != DRM_PLANE_TYPE_CURSOR)
6653 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6654 supported_rotations);
6656 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6658 /* Create (reset) the plane state */
6659 if (plane->funcs->reset)
6660 plane->funcs->reset(plane);
6665 #ifdef CONFIG_DEBUG_FS
6666 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6667 struct amdgpu_crtc *acrtc)
6669 drm_object_attach_property(&acrtc->base.base,
6670 dm->crc_win_x_start_property,
6672 drm_object_attach_property(&acrtc->base.base,
6673 dm->crc_win_y_start_property,
6675 drm_object_attach_property(&acrtc->base.base,
6676 dm->crc_win_x_end_property,
6678 drm_object_attach_property(&acrtc->base.base,
6679 dm->crc_win_y_end_property,
6684 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6685 struct drm_plane *plane,
6686 uint32_t crtc_index)
6688 struct amdgpu_crtc *acrtc = NULL;
6689 struct drm_plane *cursor_plane;
6693 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6697 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6698 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6700 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6704 res = drm_crtc_init_with_planes(
6709 &amdgpu_dm_crtc_funcs, NULL);
6714 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6716 /* Create (reset) the plane state */
6717 if (acrtc->base.funcs->reset)
6718 acrtc->base.funcs->reset(&acrtc->base);
6720 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6721 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6723 acrtc->crtc_id = crtc_index;
6724 acrtc->base.enabled = false;
6725 acrtc->otg_inst = -1;
6727 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6728 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6729 true, MAX_COLOR_LUT_ENTRIES);
6730 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6731 #ifdef CONFIG_DEBUG_FS
6732 attach_crtc_crc_properties(dm, acrtc);
6738 kfree(cursor_plane);
6743 static int to_drm_connector_type(enum signal_type st)
6746 case SIGNAL_TYPE_HDMI_TYPE_A:
6747 return DRM_MODE_CONNECTOR_HDMIA;
6748 case SIGNAL_TYPE_EDP:
6749 return DRM_MODE_CONNECTOR_eDP;
6750 case SIGNAL_TYPE_LVDS:
6751 return DRM_MODE_CONNECTOR_LVDS;
6752 case SIGNAL_TYPE_RGB:
6753 return DRM_MODE_CONNECTOR_VGA;
6754 case SIGNAL_TYPE_DISPLAY_PORT:
6755 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6756 return DRM_MODE_CONNECTOR_DisplayPort;
6757 case SIGNAL_TYPE_DVI_DUAL_LINK:
6758 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6759 return DRM_MODE_CONNECTOR_DVID;
6760 case SIGNAL_TYPE_VIRTUAL:
6761 return DRM_MODE_CONNECTOR_VIRTUAL;
6764 return DRM_MODE_CONNECTOR_Unknown;
6768 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6770 struct drm_encoder *encoder;
6772 /* There is only one encoder per connector */
6773 drm_connector_for_each_possible_encoder(connector, encoder)
6779 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6781 struct drm_encoder *encoder;
6782 struct amdgpu_encoder *amdgpu_encoder;
6784 encoder = amdgpu_dm_connector_to_encoder(connector);
6786 if (encoder == NULL)
6789 amdgpu_encoder = to_amdgpu_encoder(encoder);
6791 amdgpu_encoder->native_mode.clock = 0;
6793 if (!list_empty(&connector->probed_modes)) {
6794 struct drm_display_mode *preferred_mode = NULL;
6796 list_for_each_entry(preferred_mode,
6797 &connector->probed_modes,
6799 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6800 amdgpu_encoder->native_mode = *preferred_mode;
6808 static struct drm_display_mode *
6809 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6811 int hdisplay, int vdisplay)
6813 struct drm_device *dev = encoder->dev;
6814 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6815 struct drm_display_mode *mode = NULL;
6816 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6818 mode = drm_mode_duplicate(dev, native_mode);
6823 mode->hdisplay = hdisplay;
6824 mode->vdisplay = vdisplay;
6825 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6826 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6832 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6833 struct drm_connector *connector)
6835 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6836 struct drm_display_mode *mode = NULL;
6837 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6838 struct amdgpu_dm_connector *amdgpu_dm_connector =
6839 to_amdgpu_dm_connector(connector);
6843 char name[DRM_DISPLAY_MODE_LEN];
6846 } common_modes[] = {
6847 { "640x480", 640, 480},
6848 { "800x600", 800, 600},
6849 { "1024x768", 1024, 768},
6850 { "1280x720", 1280, 720},
6851 { "1280x800", 1280, 800},
6852 {"1280x1024", 1280, 1024},
6853 { "1440x900", 1440, 900},
6854 {"1680x1050", 1680, 1050},
6855 {"1600x1200", 1600, 1200},
6856 {"1920x1080", 1920, 1080},
6857 {"1920x1200", 1920, 1200}
6860 n = ARRAY_SIZE(common_modes);
6862 for (i = 0; i < n; i++) {
6863 struct drm_display_mode *curmode = NULL;
6864 bool mode_existed = false;
6866 if (common_modes[i].w > native_mode->hdisplay ||
6867 common_modes[i].h > native_mode->vdisplay ||
6868 (common_modes[i].w == native_mode->hdisplay &&
6869 common_modes[i].h == native_mode->vdisplay))
6872 list_for_each_entry(curmode, &connector->probed_modes, head) {
6873 if (common_modes[i].w == curmode->hdisplay &&
6874 common_modes[i].h == curmode->vdisplay) {
6875 mode_existed = true;
6883 mode = amdgpu_dm_create_common_mode(encoder,
6884 common_modes[i].name, common_modes[i].w,
6886 drm_mode_probed_add(connector, mode);
6887 amdgpu_dm_connector->num_modes++;
6891 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6894 struct amdgpu_dm_connector *amdgpu_dm_connector =
6895 to_amdgpu_dm_connector(connector);
6898 /* empty probed_modes */
6899 INIT_LIST_HEAD(&connector->probed_modes);
6900 amdgpu_dm_connector->num_modes =
6901 drm_add_edid_modes(connector, edid);
6903 /* sorting the probed modes before calling function
6904 * amdgpu_dm_get_native_mode() since EDID can have
6905 * more than one preferred mode. The modes that are
6906 * later in the probed mode list could be of higher
6907 * and preferred resolution. For example, 3840x2160
6908 * resolution in base EDID preferred timing and 4096x2160
6909 * preferred resolution in DID extension block later.
6911 drm_mode_sort(&connector->probed_modes);
6912 amdgpu_dm_get_native_mode(connector);
6914 amdgpu_dm_connector->num_modes = 0;
6918 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6920 struct amdgpu_dm_connector *amdgpu_dm_connector =
6921 to_amdgpu_dm_connector(connector);
6922 struct drm_encoder *encoder;
6923 struct edid *edid = amdgpu_dm_connector->edid;
6925 encoder = amdgpu_dm_connector_to_encoder(connector);
6927 if (!drm_edid_is_valid(edid)) {
6928 amdgpu_dm_connector->num_modes =
6929 drm_add_modes_noedid(connector, 640, 480);
6931 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6932 amdgpu_dm_connector_add_common_modes(encoder, connector);
6934 amdgpu_dm_fbc_init(connector);
6936 return amdgpu_dm_connector->num_modes;
6939 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6940 struct amdgpu_dm_connector *aconnector,
6942 struct dc_link *link,
6945 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6948 * Some of the properties below require access to state, like bpc.
6949 * Allocate some default initial connector state with our reset helper.
6951 if (aconnector->base.funcs->reset)
6952 aconnector->base.funcs->reset(&aconnector->base);
6954 aconnector->connector_id = link_index;
6955 aconnector->dc_link = link;
6956 aconnector->base.interlace_allowed = false;
6957 aconnector->base.doublescan_allowed = false;
6958 aconnector->base.stereo_allowed = false;
6959 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6960 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6961 aconnector->audio_inst = -1;
6962 mutex_init(&aconnector->hpd_lock);
6965 * configure support HPD hot plug connector_>polled default value is 0
6966 * which means HPD hot plug not supported
6968 switch (connector_type) {
6969 case DRM_MODE_CONNECTOR_HDMIA:
6970 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6971 aconnector->base.ycbcr_420_allowed =
6972 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6974 case DRM_MODE_CONNECTOR_DisplayPort:
6975 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6976 aconnector->base.ycbcr_420_allowed =
6977 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6979 case DRM_MODE_CONNECTOR_DVID:
6980 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6986 drm_object_attach_property(&aconnector->base.base,
6987 dm->ddev->mode_config.scaling_mode_property,
6988 DRM_MODE_SCALE_NONE);
6990 drm_object_attach_property(&aconnector->base.base,
6991 adev->mode_info.underscan_property,
6993 drm_object_attach_property(&aconnector->base.base,
6994 adev->mode_info.underscan_hborder_property,
6996 drm_object_attach_property(&aconnector->base.base,
6997 adev->mode_info.underscan_vborder_property,
7000 if (!aconnector->mst_port)
7001 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7003 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7004 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7005 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7007 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7008 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7009 drm_object_attach_property(&aconnector->base.base,
7010 adev->mode_info.abm_level_property, 0);
7013 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7014 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7015 connector_type == DRM_MODE_CONNECTOR_eDP) {
7016 drm_object_attach_property(
7017 &aconnector->base.base,
7018 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7020 if (!aconnector->mst_port)
7021 drm_connector_attach_vrr_capable_property(&aconnector->base);
7023 #ifdef CONFIG_DRM_AMD_DC_HDCP
7024 if (adev->dm.hdcp_workqueue)
7025 drm_connector_attach_content_protection_property(&aconnector->base, true);
7030 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7031 struct i2c_msg *msgs, int num)
7033 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7034 struct ddc_service *ddc_service = i2c->ddc_service;
7035 struct i2c_command cmd;
7039 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7044 cmd.number_of_payloads = num;
7045 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7048 for (i = 0; i < num; i++) {
7049 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7050 cmd.payloads[i].address = msgs[i].addr;
7051 cmd.payloads[i].length = msgs[i].len;
7052 cmd.payloads[i].data = msgs[i].buf;
7056 ddc_service->ctx->dc,
7057 ddc_service->ddc_pin->hw_info.ddc_channel,
7061 kfree(cmd.payloads);
7065 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7067 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7070 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7071 .master_xfer = amdgpu_dm_i2c_xfer,
7072 .functionality = amdgpu_dm_i2c_func,
7075 static struct amdgpu_i2c_adapter *
7076 create_i2c(struct ddc_service *ddc_service,
7080 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7081 struct amdgpu_i2c_adapter *i2c;
7083 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7086 i2c->base.owner = THIS_MODULE;
7087 i2c->base.class = I2C_CLASS_DDC;
7088 i2c->base.dev.parent = &adev->pdev->dev;
7089 i2c->base.algo = &amdgpu_dm_i2c_algo;
7090 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7091 i2c_set_adapdata(&i2c->base, i2c);
7092 i2c->ddc_service = ddc_service;
7093 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7100 * Note: this function assumes that dc_link_detect() was called for the
7101 * dc_link which will be represented by this aconnector.
7103 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7104 struct amdgpu_dm_connector *aconnector,
7105 uint32_t link_index,
7106 struct amdgpu_encoder *aencoder)
7110 struct dc *dc = dm->dc;
7111 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7112 struct amdgpu_i2c_adapter *i2c;
7114 link->priv = aconnector;
7116 DRM_DEBUG_DRIVER("%s()\n", __func__);
7118 i2c = create_i2c(link->ddc, link->link_index, &res);
7120 DRM_ERROR("Failed to create i2c adapter data\n");
7124 aconnector->i2c = i2c;
7125 res = i2c_add_adapter(&i2c->base);
7128 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7132 connector_type = to_drm_connector_type(link->connector_signal);
7134 res = drm_connector_init_with_ddc(
7137 &amdgpu_dm_connector_funcs,
7142 DRM_ERROR("connector_init failed\n");
7143 aconnector->connector_id = -1;
7147 drm_connector_helper_add(
7149 &amdgpu_dm_connector_helper_funcs);
7151 amdgpu_dm_connector_init_helper(
7158 drm_connector_attach_encoder(
7159 &aconnector->base, &aencoder->base);
7161 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7162 || connector_type == DRM_MODE_CONNECTOR_eDP)
7163 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7168 aconnector->i2c = NULL;
7173 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7175 switch (adev->mode_info.num_crtc) {
7192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7193 struct amdgpu_encoder *aencoder,
7194 uint32_t link_index)
7196 struct amdgpu_device *adev = drm_to_adev(dev);
7198 int res = drm_encoder_init(dev,
7200 &amdgpu_dm_encoder_funcs,
7201 DRM_MODE_ENCODER_TMDS,
7204 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7207 aencoder->encoder_id = link_index;
7209 aencoder->encoder_id = -1;
7211 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7216 static void manage_dm_interrupts(struct amdgpu_device *adev,
7217 struct amdgpu_crtc *acrtc,
7221 * We have no guarantee that the frontend index maps to the same
7222 * backend index - some even map to more than one.
7224 * TODO: Use a different interrupt or check DC itself for the mapping.
7227 amdgpu_display_crtc_idx_to_irq_type(
7232 drm_crtc_vblank_on(&acrtc->base);
7235 &adev->pageflip_irq,
7241 &adev->pageflip_irq,
7243 drm_crtc_vblank_off(&acrtc->base);
7247 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7248 struct amdgpu_crtc *acrtc)
7251 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7254 * This reads the current state for the IRQ and force reapplies
7255 * the setting to hardware.
7257 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7261 is_scaling_state_different(const struct dm_connector_state *dm_state,
7262 const struct dm_connector_state *old_dm_state)
7264 if (dm_state->scaling != old_dm_state->scaling)
7266 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7267 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7269 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7270 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7272 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7273 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7278 #ifdef CONFIG_DRM_AMD_DC_HDCP
7279 static bool is_content_protection_different(struct drm_connector_state *state,
7280 const struct drm_connector_state *old_state,
7281 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7283 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7284 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7286 /* Handle: Type0/1 change */
7287 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7288 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7289 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7293 /* CP is being re enabled, ignore this
7295 * Handles: ENABLED -> DESIRED
7297 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7298 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7299 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7303 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7305 * Handles: UNDESIRED -> ENABLED
7307 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7308 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7309 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7311 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7312 * hot-plug, headless s3, dpms
7314 * Handles: DESIRED -> DESIRED (Special case)
7316 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7317 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7318 dm_con_state->update_hdcp = false;
7323 * Handles: UNDESIRED -> UNDESIRED
7324 * DESIRED -> DESIRED
7325 * ENABLED -> ENABLED
7327 if (old_state->content_protection == state->content_protection)
7331 * Handles: UNDESIRED -> DESIRED
7332 * DESIRED -> UNDESIRED
7333 * ENABLED -> UNDESIRED
7335 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7339 * Handles: DESIRED -> ENABLED
7345 static void remove_stream(struct amdgpu_device *adev,
7346 struct amdgpu_crtc *acrtc,
7347 struct dc_stream_state *stream)
7349 /* this is the update mode case */
7351 acrtc->otg_inst = -1;
7352 acrtc->enabled = false;
7355 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7356 struct dc_cursor_position *position)
7358 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7360 int xorigin = 0, yorigin = 0;
7362 position->enable = false;
7366 if (!crtc || !plane->state->fb)
7369 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7370 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7371 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7373 plane->state->crtc_w,
7374 plane->state->crtc_h);
7378 x = plane->state->crtc_x;
7379 y = plane->state->crtc_y;
7381 if (x <= -amdgpu_crtc->max_cursor_width ||
7382 y <= -amdgpu_crtc->max_cursor_height)
7386 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7390 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7393 position->enable = true;
7394 position->translate_by_source = true;
7397 position->x_hotspot = xorigin;
7398 position->y_hotspot = yorigin;
7403 static void handle_cursor_update(struct drm_plane *plane,
7404 struct drm_plane_state *old_plane_state)
7406 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7407 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7408 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7409 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7410 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7411 uint64_t address = afb ? afb->address : 0;
7412 struct dc_cursor_position position;
7413 struct dc_cursor_attributes attributes;
7416 if (!plane->state->fb && !old_plane_state->fb)
7419 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7421 amdgpu_crtc->crtc_id,
7422 plane->state->crtc_w,
7423 plane->state->crtc_h);
7425 ret = get_cursor_position(plane, crtc, &position);
7429 if (!position.enable) {
7430 /* turn off cursor */
7431 if (crtc_state && crtc_state->stream) {
7432 mutex_lock(&adev->dm.dc_lock);
7433 dc_stream_set_cursor_position(crtc_state->stream,
7435 mutex_unlock(&adev->dm.dc_lock);
7440 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7441 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7443 memset(&attributes, 0, sizeof(attributes));
7444 attributes.address.high_part = upper_32_bits(address);
7445 attributes.address.low_part = lower_32_bits(address);
7446 attributes.width = plane->state->crtc_w;
7447 attributes.height = plane->state->crtc_h;
7448 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7449 attributes.rotation_angle = 0;
7450 attributes.attribute_flags.value = 0;
7452 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7454 if (crtc_state->stream) {
7455 mutex_lock(&adev->dm.dc_lock);
7456 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7458 DRM_ERROR("DC failed to set cursor attributes\n");
7460 if (!dc_stream_set_cursor_position(crtc_state->stream,
7462 DRM_ERROR("DC failed to set cursor position\n");
7463 mutex_unlock(&adev->dm.dc_lock);
7467 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7470 assert_spin_locked(&acrtc->base.dev->event_lock);
7471 WARN_ON(acrtc->event);
7473 acrtc->event = acrtc->base.state->event;
7475 /* Set the flip status */
7476 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7478 /* Mark this event as consumed */
7479 acrtc->base.state->event = NULL;
7481 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7485 static void update_freesync_state_on_stream(
7486 struct amdgpu_display_manager *dm,
7487 struct dm_crtc_state *new_crtc_state,
7488 struct dc_stream_state *new_stream,
7489 struct dc_plane_state *surface,
7490 u32 flip_timestamp_in_us)
7492 struct mod_vrr_params vrr_params;
7493 struct dc_info_packet vrr_infopacket = {0};
7494 struct amdgpu_device *adev = dm->adev;
7495 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7496 unsigned long flags;
7502 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7503 * For now it's sufficient to just guard against these conditions.
7506 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7509 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7510 vrr_params = acrtc->dm_irq_params.vrr_params;
7513 mod_freesync_handle_preflip(
7514 dm->freesync_module,
7517 flip_timestamp_in_us,
7520 if (adev->family < AMDGPU_FAMILY_AI &&
7521 amdgpu_dm_vrr_active(new_crtc_state)) {
7522 mod_freesync_handle_v_update(dm->freesync_module,
7523 new_stream, &vrr_params);
7525 /* Need to call this before the frame ends. */
7526 dc_stream_adjust_vmin_vmax(dm->dc,
7527 new_crtc_state->stream,
7528 &vrr_params.adjust);
7532 mod_freesync_build_vrr_infopacket(
7533 dm->freesync_module,
7537 TRANSFER_FUNC_UNKNOWN,
7540 new_crtc_state->freesync_timing_changed |=
7541 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7543 sizeof(vrr_params.adjust)) != 0);
7545 new_crtc_state->freesync_vrr_info_changed |=
7546 (memcmp(&new_crtc_state->vrr_infopacket,
7548 sizeof(vrr_infopacket)) != 0);
7550 acrtc->dm_irq_params.vrr_params = vrr_params;
7551 new_crtc_state->vrr_infopacket = vrr_infopacket;
7553 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7554 new_stream->vrr_infopacket = vrr_infopacket;
7556 if (new_crtc_state->freesync_vrr_info_changed)
7557 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7558 new_crtc_state->base.crtc->base.id,
7559 (int)new_crtc_state->base.vrr_enabled,
7560 (int)vrr_params.state);
7562 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7565 static void update_stream_irq_parameters(
7566 struct amdgpu_display_manager *dm,
7567 struct dm_crtc_state *new_crtc_state)
7569 struct dc_stream_state *new_stream = new_crtc_state->stream;
7570 struct mod_vrr_params vrr_params;
7571 struct mod_freesync_config config = new_crtc_state->freesync_config;
7572 struct amdgpu_device *adev = dm->adev;
7573 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7574 unsigned long flags;
7580 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7581 * For now it's sufficient to just guard against these conditions.
7583 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7586 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7587 vrr_params = acrtc->dm_irq_params.vrr_params;
7589 if (new_crtc_state->vrr_supported &&
7590 config.min_refresh_in_uhz &&
7591 config.max_refresh_in_uhz) {
7592 config.state = new_crtc_state->base.vrr_enabled ?
7593 VRR_STATE_ACTIVE_VARIABLE :
7596 config.state = VRR_STATE_UNSUPPORTED;
7599 mod_freesync_build_vrr_params(dm->freesync_module,
7601 &config, &vrr_params);
7603 new_crtc_state->freesync_timing_changed |=
7604 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7605 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7607 new_crtc_state->freesync_config = config;
7608 /* Copy state for access from DM IRQ handler */
7609 acrtc->dm_irq_params.freesync_config = config;
7610 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7611 acrtc->dm_irq_params.vrr_params = vrr_params;
7612 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7615 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7616 struct dm_crtc_state *new_state)
7618 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7619 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7621 if (!old_vrr_active && new_vrr_active) {
7622 /* Transition VRR inactive -> active:
7623 * While VRR is active, we must not disable vblank irq, as a
7624 * reenable after disable would compute bogus vblank/pflip
7625 * timestamps if it likely happened inside display front-porch.
7627 * We also need vupdate irq for the actual core vblank handling
7630 dm_set_vupdate_irq(new_state->base.crtc, true);
7631 drm_crtc_vblank_get(new_state->base.crtc);
7632 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7633 __func__, new_state->base.crtc->base.id);
7634 } else if (old_vrr_active && !new_vrr_active) {
7635 /* Transition VRR active -> inactive:
7636 * Allow vblank irq disable again for fixed refresh rate.
7638 dm_set_vupdate_irq(new_state->base.crtc, false);
7639 drm_crtc_vblank_put(new_state->base.crtc);
7640 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7641 __func__, new_state->base.crtc->base.id);
7645 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7647 struct drm_plane *plane;
7648 struct drm_plane_state *old_plane_state, *new_plane_state;
7652 * TODO: Make this per-stream so we don't issue redundant updates for
7653 * commits with multiple streams.
7655 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7657 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7658 handle_cursor_update(plane, old_plane_state);
7661 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7662 struct dc_state *dc_state,
7663 struct drm_device *dev,
7664 struct amdgpu_display_manager *dm,
7665 struct drm_crtc *pcrtc,
7666 bool wait_for_vblank)
7669 uint64_t timestamp_ns;
7670 struct drm_plane *plane;
7671 struct drm_plane_state *old_plane_state, *new_plane_state;
7672 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7673 struct drm_crtc_state *new_pcrtc_state =
7674 drm_atomic_get_new_crtc_state(state, pcrtc);
7675 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7676 struct dm_crtc_state *dm_old_crtc_state =
7677 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7678 int planes_count = 0, vpos, hpos;
7680 unsigned long flags;
7681 struct amdgpu_bo *abo;
7682 uint32_t target_vblank, last_flip_vblank;
7683 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7684 bool pflip_present = false;
7686 struct dc_surface_update surface_updates[MAX_SURFACES];
7687 struct dc_plane_info plane_infos[MAX_SURFACES];
7688 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7689 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7690 struct dc_stream_update stream_update;
7693 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7696 dm_error("Failed to allocate update bundle\n");
7701 * Disable the cursor first if we're disabling all the planes.
7702 * It'll remain on the screen after the planes are re-enabled
7705 if (acrtc_state->active_planes == 0)
7706 amdgpu_dm_commit_cursors(state);
7708 /* update planes when needed */
7709 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7710 struct drm_crtc *crtc = new_plane_state->crtc;
7711 struct drm_crtc_state *new_crtc_state;
7712 struct drm_framebuffer *fb = new_plane_state->fb;
7713 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7714 bool plane_needs_flip;
7715 struct dc_plane_state *dc_plane;
7716 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7718 /* Cursor plane is handled after stream updates */
7719 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7722 if (!fb || !crtc || pcrtc != crtc)
7725 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7726 if (!new_crtc_state->active)
7729 dc_plane = dm_new_plane_state->dc_state;
7731 bundle->surface_updates[planes_count].surface = dc_plane;
7732 if (new_pcrtc_state->color_mgmt_changed) {
7733 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7734 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7735 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7738 fill_dc_scaling_info(new_plane_state,
7739 &bundle->scaling_infos[planes_count]);
7741 bundle->surface_updates[planes_count].scaling_info =
7742 &bundle->scaling_infos[planes_count];
7744 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7746 pflip_present = pflip_present || plane_needs_flip;
7748 if (!plane_needs_flip) {
7753 abo = gem_to_amdgpu_bo(fb->obj[0]);
7756 * Wait for all fences on this FB. Do limited wait to avoid
7757 * deadlock during GPU reset when this fence will not signal
7758 * but we hold reservation lock for the BO.
7760 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7762 msecs_to_jiffies(5000));
7763 if (unlikely(r <= 0))
7764 DRM_ERROR("Waiting for fences timed out!");
7766 fill_dc_plane_info_and_addr(
7767 dm->adev, new_plane_state,
7769 &bundle->plane_infos[planes_count],
7770 &bundle->flip_addrs[planes_count].address,
7771 afb->tmz_surface, false);
7773 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7774 new_plane_state->plane->index,
7775 bundle->plane_infos[planes_count].dcc.enable);
7777 bundle->surface_updates[planes_count].plane_info =
7778 &bundle->plane_infos[planes_count];
7781 * Only allow immediate flips for fast updates that don't
7782 * change FB pitch, DCC state, rotation or mirroing.
7784 bundle->flip_addrs[planes_count].flip_immediate =
7785 crtc->state->async_flip &&
7786 acrtc_state->update_type == UPDATE_TYPE_FAST;
7788 timestamp_ns = ktime_get_ns();
7789 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7790 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7791 bundle->surface_updates[planes_count].surface = dc_plane;
7793 if (!bundle->surface_updates[planes_count].surface) {
7794 DRM_ERROR("No surface for CRTC: id=%d\n",
7795 acrtc_attach->crtc_id);
7799 if (plane == pcrtc->primary)
7800 update_freesync_state_on_stream(
7803 acrtc_state->stream,
7805 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7807 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7809 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7810 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7816 if (pflip_present) {
7818 /* Use old throttling in non-vrr fixed refresh rate mode
7819 * to keep flip scheduling based on target vblank counts
7820 * working in a backwards compatible way, e.g., for
7821 * clients using the GLX_OML_sync_control extension or
7822 * DRI3/Present extension with defined target_msc.
7824 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7827 /* For variable refresh rate mode only:
7828 * Get vblank of last completed flip to avoid > 1 vrr
7829 * flips per video frame by use of throttling, but allow
7830 * flip programming anywhere in the possibly large
7831 * variable vrr vblank interval for fine-grained flip
7832 * timing control and more opportunity to avoid stutter
7833 * on late submission of flips.
7835 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7836 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7837 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7840 target_vblank = last_flip_vblank + wait_for_vblank;
7843 * Wait until we're out of the vertical blank period before the one
7844 * targeted by the flip
7846 while ((acrtc_attach->enabled &&
7847 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7848 0, &vpos, &hpos, NULL,
7849 NULL, &pcrtc->hwmode)
7850 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7851 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7852 (int)(target_vblank -
7853 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7854 usleep_range(1000, 1100);
7858 * Prepare the flip event for the pageflip interrupt to handle.
7860 * This only works in the case where we've already turned on the
7861 * appropriate hardware blocks (eg. HUBP) so in the transition case
7862 * from 0 -> n planes we have to skip a hardware generated event
7863 * and rely on sending it from software.
7865 if (acrtc_attach->base.state->event &&
7866 acrtc_state->active_planes > 0) {
7867 drm_crtc_vblank_get(pcrtc);
7869 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7871 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7872 prepare_flip_isr(acrtc_attach);
7874 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7877 if (acrtc_state->stream) {
7878 if (acrtc_state->freesync_vrr_info_changed)
7879 bundle->stream_update.vrr_infopacket =
7880 &acrtc_state->stream->vrr_infopacket;
7884 /* Update the planes if changed or disable if we don't have any. */
7885 if ((planes_count || acrtc_state->active_planes == 0) &&
7886 acrtc_state->stream) {
7887 bundle->stream_update.stream = acrtc_state->stream;
7888 if (new_pcrtc_state->mode_changed) {
7889 bundle->stream_update.src = acrtc_state->stream->src;
7890 bundle->stream_update.dst = acrtc_state->stream->dst;
7893 if (new_pcrtc_state->color_mgmt_changed) {
7895 * TODO: This isn't fully correct since we've actually
7896 * already modified the stream in place.
7898 bundle->stream_update.gamut_remap =
7899 &acrtc_state->stream->gamut_remap_matrix;
7900 bundle->stream_update.output_csc_transform =
7901 &acrtc_state->stream->csc_color_matrix;
7902 bundle->stream_update.out_transfer_func =
7903 acrtc_state->stream->out_transfer_func;
7906 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7907 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7908 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7911 * If FreeSync state on the stream has changed then we need to
7912 * re-adjust the min/max bounds now that DC doesn't handle this
7913 * as part of commit.
7915 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7916 amdgpu_dm_vrr_active(acrtc_state)) {
7917 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7918 dc_stream_adjust_vmin_vmax(
7919 dm->dc, acrtc_state->stream,
7920 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7921 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7923 mutex_lock(&dm->dc_lock);
7924 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7925 acrtc_state->stream->link->psr_settings.psr_allow_active)
7926 amdgpu_dm_psr_disable(acrtc_state->stream);
7928 dc_commit_updates_for_stream(dm->dc,
7929 bundle->surface_updates,
7931 acrtc_state->stream,
7932 &bundle->stream_update,
7936 * Enable or disable the interrupts on the backend.
7938 * Most pipes are put into power gating when unused.
7940 * When power gating is enabled on a pipe we lose the
7941 * interrupt enablement state when power gating is disabled.
7943 * So we need to update the IRQ control state in hardware
7944 * whenever the pipe turns on (since it could be previously
7945 * power gated) or off (since some pipes can't be power gated
7948 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7949 dm_update_pflip_irq_state(drm_to_adev(dev),
7952 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7953 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7954 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7955 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7956 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7957 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7958 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7959 amdgpu_dm_psr_enable(acrtc_state->stream);
7962 mutex_unlock(&dm->dc_lock);
7966 * Update cursor state *after* programming all the planes.
7967 * This avoids redundant programming in the case where we're going
7968 * to be disabling a single plane - those pipes are being disabled.
7970 if (acrtc_state->active_planes)
7971 amdgpu_dm_commit_cursors(state);
7977 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7978 struct drm_atomic_state *state)
7980 struct amdgpu_device *adev = drm_to_adev(dev);
7981 struct amdgpu_dm_connector *aconnector;
7982 struct drm_connector *connector;
7983 struct drm_connector_state *old_con_state, *new_con_state;
7984 struct drm_crtc_state *new_crtc_state;
7985 struct dm_crtc_state *new_dm_crtc_state;
7986 const struct dc_stream_status *status;
7989 /* Notify device removals. */
7990 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7991 if (old_con_state->crtc != new_con_state->crtc) {
7992 /* CRTC changes require notification. */
7996 if (!new_con_state->crtc)
7999 new_crtc_state = drm_atomic_get_new_crtc_state(
8000 state, new_con_state->crtc);
8002 if (!new_crtc_state)
8005 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8009 aconnector = to_amdgpu_dm_connector(connector);
8011 mutex_lock(&adev->dm.audio_lock);
8012 inst = aconnector->audio_inst;
8013 aconnector->audio_inst = -1;
8014 mutex_unlock(&adev->dm.audio_lock);
8016 amdgpu_dm_audio_eld_notify(adev, inst);
8019 /* Notify audio device additions. */
8020 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8021 if (!new_con_state->crtc)
8024 new_crtc_state = drm_atomic_get_new_crtc_state(
8025 state, new_con_state->crtc);
8027 if (!new_crtc_state)
8030 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8033 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8034 if (!new_dm_crtc_state->stream)
8037 status = dc_stream_get_status(new_dm_crtc_state->stream);
8041 aconnector = to_amdgpu_dm_connector(connector);
8043 mutex_lock(&adev->dm.audio_lock);
8044 inst = status->audio_inst;
8045 aconnector->audio_inst = inst;
8046 mutex_unlock(&adev->dm.audio_lock);
8048 amdgpu_dm_audio_eld_notify(adev, inst);
8053 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8054 * @crtc_state: the DRM CRTC state
8055 * @stream_state: the DC stream state.
8057 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8058 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8060 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8061 struct dc_stream_state *stream_state)
8063 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8067 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8068 * @state: The atomic state to commit
8070 * This will tell DC to commit the constructed DC state from atomic_check,
8071 * programming the hardware. Any failures here implies a hardware failure, since
8072 * atomic check should have filtered anything non-kosher.
8074 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8076 struct drm_device *dev = state->dev;
8077 struct amdgpu_device *adev = drm_to_adev(dev);
8078 struct amdgpu_display_manager *dm = &adev->dm;
8079 struct dm_atomic_state *dm_state;
8080 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8082 struct drm_crtc *crtc;
8083 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8084 unsigned long flags;
8085 bool wait_for_vblank = true;
8086 struct drm_connector *connector;
8087 struct drm_connector_state *old_con_state, *new_con_state;
8088 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8089 int crtc_disable_count = 0;
8090 bool mode_set_reset_required = false;
8092 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8094 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8096 dm_state = dm_atomic_get_new_state(state);
8097 if (dm_state && dm_state->context) {
8098 dc_state = dm_state->context;
8100 /* No state changes, retain current state. */
8101 dc_state_temp = dc_create_state(dm->dc);
8102 ASSERT(dc_state_temp);
8103 dc_state = dc_state_temp;
8104 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8107 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8108 new_crtc_state, i) {
8109 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8111 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8113 if (old_crtc_state->active &&
8114 (!new_crtc_state->active ||
8115 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8116 manage_dm_interrupts(adev, acrtc, false);
8117 dc_stream_release(dm_old_crtc_state->stream);
8121 drm_atomic_helper_calc_timestamping_constants(state);
8123 /* update changed items */
8124 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8125 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8128 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8131 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8132 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8133 "connectors_changed:%d\n",
8135 new_crtc_state->enable,
8136 new_crtc_state->active,
8137 new_crtc_state->planes_changed,
8138 new_crtc_state->mode_changed,
8139 new_crtc_state->active_changed,
8140 new_crtc_state->connectors_changed);
8142 /* Disable cursor if disabling crtc */
8143 if (old_crtc_state->active && !new_crtc_state->active) {
8144 struct dc_cursor_position position;
8146 memset(&position, 0, sizeof(position));
8147 mutex_lock(&dm->dc_lock);
8148 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8149 mutex_unlock(&dm->dc_lock);
8152 /* Copy all transient state flags into dc state */
8153 if (dm_new_crtc_state->stream) {
8154 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8155 dm_new_crtc_state->stream);
8158 /* handles headless hotplug case, updating new_state and
8159 * aconnector as needed
8162 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8164 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8166 if (!dm_new_crtc_state->stream) {
8168 * this could happen because of issues with
8169 * userspace notifications delivery.
8170 * In this case userspace tries to set mode on
8171 * display which is disconnected in fact.
8172 * dc_sink is NULL in this case on aconnector.
8173 * We expect reset mode will come soon.
8175 * This can also happen when unplug is done
8176 * during resume sequence ended
8178 * In this case, we want to pretend we still
8179 * have a sink to keep the pipe running so that
8180 * hw state is consistent with the sw state
8182 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8183 __func__, acrtc->base.base.id);
8187 if (dm_old_crtc_state->stream)
8188 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8190 pm_runtime_get_noresume(dev->dev);
8192 acrtc->enabled = true;
8193 acrtc->hw_mode = new_crtc_state->mode;
8194 crtc->hwmode = new_crtc_state->mode;
8195 mode_set_reset_required = true;
8196 } else if (modereset_required(new_crtc_state)) {
8197 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8198 /* i.e. reset mode */
8199 if (dm_old_crtc_state->stream)
8200 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8201 mode_set_reset_required = true;
8203 } /* for_each_crtc_in_state() */
8206 /* if there mode set or reset, disable eDP PSR */
8207 if (mode_set_reset_required)
8208 amdgpu_dm_psr_disable_all(dm);
8210 dm_enable_per_frame_crtc_master_sync(dc_state);
8211 mutex_lock(&dm->dc_lock);
8212 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8213 mutex_unlock(&dm->dc_lock);
8216 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8217 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8219 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8221 if (dm_new_crtc_state->stream != NULL) {
8222 const struct dc_stream_status *status =
8223 dc_stream_get_status(dm_new_crtc_state->stream);
8226 status = dc_stream_get_status_from_state(dc_state,
8227 dm_new_crtc_state->stream);
8229 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8231 acrtc->otg_inst = status->primary_otg_inst;
8234 #ifdef CONFIG_DRM_AMD_DC_HDCP
8235 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8236 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8237 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8238 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8240 new_crtc_state = NULL;
8243 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8245 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8247 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8248 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8249 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8250 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8251 dm_new_con_state->update_hdcp = true;
8255 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8256 hdcp_update_display(
8257 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8258 new_con_state->hdcp_content_type,
8259 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8264 /* Handle connector state changes */
8265 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8266 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8267 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8268 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8269 struct dc_surface_update dummy_updates[MAX_SURFACES];
8270 struct dc_stream_update stream_update;
8271 struct dc_info_packet hdr_packet;
8272 struct dc_stream_status *status = NULL;
8273 bool abm_changed, hdr_changed, scaling_changed;
8275 memset(&dummy_updates, 0, sizeof(dummy_updates));
8276 memset(&stream_update, 0, sizeof(stream_update));
8279 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8280 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8283 /* Skip any modesets/resets */
8284 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8287 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8288 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8290 scaling_changed = is_scaling_state_different(dm_new_con_state,
8293 abm_changed = dm_new_crtc_state->abm_level !=
8294 dm_old_crtc_state->abm_level;
8297 is_hdr_metadata_different(old_con_state, new_con_state);
8299 if (!scaling_changed && !abm_changed && !hdr_changed)
8302 stream_update.stream = dm_new_crtc_state->stream;
8303 if (scaling_changed) {
8304 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8305 dm_new_con_state, dm_new_crtc_state->stream);
8307 stream_update.src = dm_new_crtc_state->stream->src;
8308 stream_update.dst = dm_new_crtc_state->stream->dst;
8312 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8314 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8318 fill_hdr_info_packet(new_con_state, &hdr_packet);
8319 stream_update.hdr_static_metadata = &hdr_packet;
8322 status = dc_stream_get_status(dm_new_crtc_state->stream);
8324 WARN_ON(!status->plane_count);
8327 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8328 * Here we create an empty update on each plane.
8329 * To fix this, DC should permit updating only stream properties.
8331 for (j = 0; j < status->plane_count; j++)
8332 dummy_updates[j].surface = status->plane_states[0];
8335 mutex_lock(&dm->dc_lock);
8336 dc_commit_updates_for_stream(dm->dc,
8338 status->plane_count,
8339 dm_new_crtc_state->stream,
8342 mutex_unlock(&dm->dc_lock);
8345 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8346 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8347 new_crtc_state, i) {
8348 if (old_crtc_state->active && !new_crtc_state->active)
8349 crtc_disable_count++;
8351 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8354 /* For freesync config update on crtc state and params for irq */
8355 update_stream_irq_parameters(dm, dm_new_crtc_state);
8357 /* Handle vrr on->off / off->on transitions */
8358 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8363 * Enable interrupts for CRTCs that are newly enabled or went through
8364 * a modeset. It was intentionally deferred until after the front end
8365 * state was modified to wait until the OTG was on and so the IRQ
8366 * handlers didn't access stale or invalid state.
8368 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8369 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8370 bool configure_crc = false;
8372 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8374 if (new_crtc_state->active &&
8375 (!old_crtc_state->active ||
8376 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8377 dc_stream_retain(dm_new_crtc_state->stream);
8378 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8379 manage_dm_interrupts(adev, acrtc, true);
8381 #ifdef CONFIG_DEBUG_FS
8382 if (new_crtc_state->active &&
8383 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8385 * Frontend may have changed so reapply the CRC capture
8386 * settings for the stream.
8388 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8389 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8391 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8392 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8393 configure_crc = true;
8395 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8396 configure_crc = true;
8400 amdgpu_dm_crtc_configure_crc_source(
8401 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8406 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8407 if (new_crtc_state->async_flip)
8408 wait_for_vblank = false;
8410 /* update planes when needed per crtc*/
8411 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8412 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8414 if (dm_new_crtc_state->stream)
8415 amdgpu_dm_commit_planes(state, dc_state, dev,
8416 dm, crtc, wait_for_vblank);
8419 /* Update audio instances for each connector. */
8420 amdgpu_dm_commit_audio(dev, state);
8423 * send vblank event on all events not handled in flip and
8424 * mark consumed event for drm_atomic_helper_commit_hw_done
8426 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8427 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8429 if (new_crtc_state->event)
8430 drm_send_event_locked(dev, &new_crtc_state->event->base);
8432 new_crtc_state->event = NULL;
8434 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8436 /* Signal HW programming completion */
8437 drm_atomic_helper_commit_hw_done(state);
8439 if (wait_for_vblank)
8440 drm_atomic_helper_wait_for_flip_done(dev, state);
8442 drm_atomic_helper_cleanup_planes(dev, state);
8444 /* return the stolen vga memory back to VRAM */
8445 if (!adev->mman.keep_stolen_vga_memory)
8446 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8447 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8450 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8451 * so we can put the GPU into runtime suspend if we're not driving any
8454 for (i = 0; i < crtc_disable_count; i++)
8455 pm_runtime_put_autosuspend(dev->dev);
8456 pm_runtime_mark_last_busy(dev->dev);
8459 dc_release_state(dc_state_temp);
8463 static int dm_force_atomic_commit(struct drm_connector *connector)
8466 struct drm_device *ddev = connector->dev;
8467 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8468 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8469 struct drm_plane *plane = disconnected_acrtc->base.primary;
8470 struct drm_connector_state *conn_state;
8471 struct drm_crtc_state *crtc_state;
8472 struct drm_plane_state *plane_state;
8477 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8479 /* Construct an atomic state to restore previous display setting */
8482 * Attach connectors to drm_atomic_state
8484 conn_state = drm_atomic_get_connector_state(state, connector);
8486 ret = PTR_ERR_OR_ZERO(conn_state);
8490 /* Attach crtc to drm_atomic_state*/
8491 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8493 ret = PTR_ERR_OR_ZERO(crtc_state);
8497 /* force a restore */
8498 crtc_state->mode_changed = true;
8500 /* Attach plane to drm_atomic_state */
8501 plane_state = drm_atomic_get_plane_state(state, plane);
8503 ret = PTR_ERR_OR_ZERO(plane_state);
8508 /* Call commit internally with the state we just constructed */
8509 ret = drm_atomic_commit(state);
8514 DRM_ERROR("Restoring old state failed with %i\n", ret);
8515 drm_atomic_state_put(state);
8521 * This function handles all cases when set mode does not come upon hotplug.
8522 * This includes when a display is unplugged then plugged back into the
8523 * same port and when running without usermode desktop manager supprot
8525 void dm_restore_drm_connector_state(struct drm_device *dev,
8526 struct drm_connector *connector)
8528 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8529 struct amdgpu_crtc *disconnected_acrtc;
8530 struct dm_crtc_state *acrtc_state;
8532 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8535 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8536 if (!disconnected_acrtc)
8539 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8540 if (!acrtc_state->stream)
8544 * If the previous sink is not released and different from the current,
8545 * we deduce we are in a state where we can not rely on usermode call
8546 * to turn on the display, so we do it here
8548 if (acrtc_state->stream->sink != aconnector->dc_sink)
8549 dm_force_atomic_commit(&aconnector->base);
8553 * Grabs all modesetting locks to serialize against any blocking commits,
8554 * Waits for completion of all non blocking commits.
8556 static int do_aquire_global_lock(struct drm_device *dev,
8557 struct drm_atomic_state *state)
8559 struct drm_crtc *crtc;
8560 struct drm_crtc_commit *commit;
8564 * Adding all modeset locks to aquire_ctx will
8565 * ensure that when the framework release it the
8566 * extra locks we are locking here will get released to
8568 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8572 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8573 spin_lock(&crtc->commit_lock);
8574 commit = list_first_entry_or_null(&crtc->commit_list,
8575 struct drm_crtc_commit, commit_entry);
8577 drm_crtc_commit_get(commit);
8578 spin_unlock(&crtc->commit_lock);
8584 * Make sure all pending HW programming completed and
8587 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8590 ret = wait_for_completion_interruptible_timeout(
8591 &commit->flip_done, 10*HZ);
8594 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8595 "timed out\n", crtc->base.id, crtc->name);
8597 drm_crtc_commit_put(commit);
8600 return ret < 0 ? ret : 0;
8603 static void get_freesync_config_for_crtc(
8604 struct dm_crtc_state *new_crtc_state,
8605 struct dm_connector_state *new_con_state)
8607 struct mod_freesync_config config = {0};
8608 struct amdgpu_dm_connector *aconnector =
8609 to_amdgpu_dm_connector(new_con_state->base.connector);
8610 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8611 int vrefresh = drm_mode_vrefresh(mode);
8613 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8614 vrefresh >= aconnector->min_vfreq &&
8615 vrefresh <= aconnector->max_vfreq;
8617 if (new_crtc_state->vrr_supported) {
8618 new_crtc_state->stream->ignore_msa_timing_param = true;
8619 config.state = new_crtc_state->base.vrr_enabled ?
8620 VRR_STATE_ACTIVE_VARIABLE :
8622 config.min_refresh_in_uhz =
8623 aconnector->min_vfreq * 1000000;
8624 config.max_refresh_in_uhz =
8625 aconnector->max_vfreq * 1000000;
8626 config.vsif_supported = true;
8630 new_crtc_state->freesync_config = config;
8633 static void reset_freesync_config_for_crtc(
8634 struct dm_crtc_state *new_crtc_state)
8636 new_crtc_state->vrr_supported = false;
8638 memset(&new_crtc_state->vrr_infopacket, 0,
8639 sizeof(new_crtc_state->vrr_infopacket));
8642 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8643 struct drm_atomic_state *state,
8644 struct drm_crtc *crtc,
8645 struct drm_crtc_state *old_crtc_state,
8646 struct drm_crtc_state *new_crtc_state,
8648 bool *lock_and_validation_needed)
8650 struct dm_atomic_state *dm_state = NULL;
8651 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8652 struct dc_stream_state *new_stream;
8656 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8657 * update changed items
8659 struct amdgpu_crtc *acrtc = NULL;
8660 struct amdgpu_dm_connector *aconnector = NULL;
8661 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8662 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8666 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8667 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8668 acrtc = to_amdgpu_crtc(crtc);
8669 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8671 /* TODO This hack should go away */
8672 if (aconnector && enable) {
8673 /* Make sure fake sink is created in plug-in scenario */
8674 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8676 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8679 if (IS_ERR(drm_new_conn_state)) {
8680 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8684 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8685 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8687 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8690 new_stream = create_validate_stream_for_sink(aconnector,
8691 &new_crtc_state->mode,
8693 dm_old_crtc_state->stream);
8696 * we can have no stream on ACTION_SET if a display
8697 * was disconnected during S3, in this case it is not an
8698 * error, the OS will be updated after detection, and
8699 * will do the right thing on next atomic commit
8703 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8704 __func__, acrtc->base.base.id);
8710 * TODO: Check VSDB bits to decide whether this should
8711 * be enabled or not.
8713 new_stream->triggered_crtc_reset.enabled =
8714 dm->force_timing_sync;
8716 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8718 ret = fill_hdr_info_packet(drm_new_conn_state,
8719 &new_stream->hdr_static_metadata);
8724 * If we already removed the old stream from the context
8725 * (and set the new stream to NULL) then we can't reuse
8726 * the old stream even if the stream and scaling are unchanged.
8727 * We'll hit the BUG_ON and black screen.
8729 * TODO: Refactor this function to allow this check to work
8730 * in all conditions.
8732 if (dm_new_crtc_state->stream &&
8733 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8734 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8735 new_crtc_state->mode_changed = false;
8736 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8737 new_crtc_state->mode_changed);
8741 /* mode_changed flag may get updated above, need to check again */
8742 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8746 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8747 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8748 "connectors_changed:%d\n",
8750 new_crtc_state->enable,
8751 new_crtc_state->active,
8752 new_crtc_state->planes_changed,
8753 new_crtc_state->mode_changed,
8754 new_crtc_state->active_changed,
8755 new_crtc_state->connectors_changed);
8757 /* Remove stream for any changed/disabled CRTC */
8760 if (!dm_old_crtc_state->stream)
8763 ret = dm_atomic_get_state(state, &dm_state);
8767 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8770 /* i.e. reset mode */
8771 if (dc_remove_stream_from_ctx(
8774 dm_old_crtc_state->stream) != DC_OK) {
8779 dc_stream_release(dm_old_crtc_state->stream);
8780 dm_new_crtc_state->stream = NULL;
8782 reset_freesync_config_for_crtc(dm_new_crtc_state);
8784 *lock_and_validation_needed = true;
8786 } else {/* Add stream for any updated/enabled CRTC */
8788 * Quick fix to prevent NULL pointer on new_stream when
8789 * added MST connectors not found in existing crtc_state in the chained mode
8790 * TODO: need to dig out the root cause of that
8792 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8795 if (modereset_required(new_crtc_state))
8798 if (modeset_required(new_crtc_state, new_stream,
8799 dm_old_crtc_state->stream)) {
8801 WARN_ON(dm_new_crtc_state->stream);
8803 ret = dm_atomic_get_state(state, &dm_state);
8807 dm_new_crtc_state->stream = new_stream;
8809 dc_stream_retain(new_stream);
8811 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8814 if (dc_add_stream_to_ctx(
8817 dm_new_crtc_state->stream) != DC_OK) {
8822 *lock_and_validation_needed = true;
8827 /* Release extra reference */
8829 dc_stream_release(new_stream);
8832 * We want to do dc stream updates that do not require a
8833 * full modeset below.
8835 if (!(enable && aconnector && new_crtc_state->active))
8838 * Given above conditions, the dc state cannot be NULL because:
8839 * 1. We're in the process of enabling CRTCs (just been added
8840 * to the dc context, or already is on the context)
8841 * 2. Has a valid connector attached, and
8842 * 3. Is currently active and enabled.
8843 * => The dc stream state currently exists.
8845 BUG_ON(dm_new_crtc_state->stream == NULL);
8847 /* Scaling or underscan settings */
8848 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8849 update_stream_scaling_settings(
8850 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8853 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8856 * Color management settings. We also update color properties
8857 * when a modeset is needed, to ensure it gets reprogrammed.
8859 if (dm_new_crtc_state->base.color_mgmt_changed ||
8860 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8861 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8866 /* Update Freesync settings. */
8867 get_freesync_config_for_crtc(dm_new_crtc_state,
8874 dc_stream_release(new_stream);
8878 static bool should_reset_plane(struct drm_atomic_state *state,
8879 struct drm_plane *plane,
8880 struct drm_plane_state *old_plane_state,
8881 struct drm_plane_state *new_plane_state)
8883 struct drm_plane *other;
8884 struct drm_plane_state *old_other_state, *new_other_state;
8885 struct drm_crtc_state *new_crtc_state;
8889 * TODO: Remove this hack once the checks below are sufficient
8890 * enough to determine when we need to reset all the planes on
8893 if (state->allow_modeset)
8896 /* Exit early if we know that we're adding or removing the plane. */
8897 if (old_plane_state->crtc != new_plane_state->crtc)
8900 /* old crtc == new_crtc == NULL, plane not in context. */
8901 if (!new_plane_state->crtc)
8905 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8907 if (!new_crtc_state)
8910 /* CRTC Degamma changes currently require us to recreate planes. */
8911 if (new_crtc_state->color_mgmt_changed)
8914 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8918 * If there are any new primary or overlay planes being added or
8919 * removed then the z-order can potentially change. To ensure
8920 * correct z-order and pipe acquisition the current DC architecture
8921 * requires us to remove and recreate all existing planes.
8923 * TODO: Come up with a more elegant solution for this.
8925 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8926 struct amdgpu_framebuffer *old_afb, *new_afb;
8927 if (other->type == DRM_PLANE_TYPE_CURSOR)
8930 if (old_other_state->crtc != new_plane_state->crtc &&
8931 new_other_state->crtc != new_plane_state->crtc)
8934 if (old_other_state->crtc != new_other_state->crtc)
8937 /* Src/dst size and scaling updates. */
8938 if (old_other_state->src_w != new_other_state->src_w ||
8939 old_other_state->src_h != new_other_state->src_h ||
8940 old_other_state->crtc_w != new_other_state->crtc_w ||
8941 old_other_state->crtc_h != new_other_state->crtc_h)
8944 /* Rotation / mirroring updates. */
8945 if (old_other_state->rotation != new_other_state->rotation)
8948 /* Blending updates. */
8949 if (old_other_state->pixel_blend_mode !=
8950 new_other_state->pixel_blend_mode)
8953 /* Alpha updates. */
8954 if (old_other_state->alpha != new_other_state->alpha)
8957 /* Colorspace changes. */
8958 if (old_other_state->color_range != new_other_state->color_range ||
8959 old_other_state->color_encoding != new_other_state->color_encoding)
8962 /* Framebuffer checks fall at the end. */
8963 if (!old_other_state->fb || !new_other_state->fb)
8966 /* Pixel format changes can require bandwidth updates. */
8967 if (old_other_state->fb->format != new_other_state->fb->format)
8970 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8971 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8973 /* Tiling and DCC changes also require bandwidth updates. */
8974 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8975 old_afb->base.modifier != new_afb->base.modifier)
8982 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8983 struct drm_plane_state *new_plane_state,
8984 struct drm_framebuffer *fb)
8986 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8987 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8991 if (fb->width > new_acrtc->max_cursor_width ||
8992 fb->height > new_acrtc->max_cursor_height) {
8993 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8994 new_plane_state->fb->width,
8995 new_plane_state->fb->height);
8998 if (new_plane_state->src_w != fb->width << 16 ||
8999 new_plane_state->src_h != fb->height << 16) {
9000 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9004 /* Pitch in pixels */
9005 pitch = fb->pitches[0] / fb->format->cpp[0];
9007 if (fb->width != pitch) {
9008 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9017 /* FB pitch is supported by cursor plane */
9020 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9024 /* Core DRM takes care of checking FB modifiers, so we only need to
9025 * check tiling flags when the FB doesn't have a modifier. */
9026 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9027 if (adev->family < AMDGPU_FAMILY_AI) {
9028 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9029 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9030 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9032 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9035 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9043 static int dm_update_plane_state(struct dc *dc,
9044 struct drm_atomic_state *state,
9045 struct drm_plane *plane,
9046 struct drm_plane_state *old_plane_state,
9047 struct drm_plane_state *new_plane_state,
9049 bool *lock_and_validation_needed)
9052 struct dm_atomic_state *dm_state = NULL;
9053 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9054 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9055 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9056 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9057 struct amdgpu_crtc *new_acrtc;
9062 new_plane_crtc = new_plane_state->crtc;
9063 old_plane_crtc = old_plane_state->crtc;
9064 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9065 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9067 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9068 if (!enable || !new_plane_crtc ||
9069 drm_atomic_plane_disabling(plane->state, new_plane_state))
9072 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9074 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9075 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9079 if (new_plane_state->fb) {
9080 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9081 new_plane_state->fb);
9089 needs_reset = should_reset_plane(state, plane, old_plane_state,
9092 /* Remove any changed/removed planes */
9097 if (!old_plane_crtc)
9100 old_crtc_state = drm_atomic_get_old_crtc_state(
9101 state, old_plane_crtc);
9102 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9104 if (!dm_old_crtc_state->stream)
9107 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9108 plane->base.id, old_plane_crtc->base.id);
9110 ret = dm_atomic_get_state(state, &dm_state);
9114 if (!dc_remove_plane_from_context(
9116 dm_old_crtc_state->stream,
9117 dm_old_plane_state->dc_state,
9118 dm_state->context)) {
9124 dc_plane_state_release(dm_old_plane_state->dc_state);
9125 dm_new_plane_state->dc_state = NULL;
9127 *lock_and_validation_needed = true;
9129 } else { /* Add new planes */
9130 struct dc_plane_state *dc_new_plane_state;
9132 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9135 if (!new_plane_crtc)
9138 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9139 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9141 if (!dm_new_crtc_state->stream)
9147 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9151 WARN_ON(dm_new_plane_state->dc_state);
9153 dc_new_plane_state = dc_create_plane_state(dc);
9154 if (!dc_new_plane_state)
9157 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9158 plane->base.id, new_plane_crtc->base.id);
9160 ret = fill_dc_plane_attributes(
9161 drm_to_adev(new_plane_crtc->dev),
9166 dc_plane_state_release(dc_new_plane_state);
9170 ret = dm_atomic_get_state(state, &dm_state);
9172 dc_plane_state_release(dc_new_plane_state);
9177 * Any atomic check errors that occur after this will
9178 * not need a release. The plane state will be attached
9179 * to the stream, and therefore part of the atomic
9180 * state. It'll be released when the atomic state is
9183 if (!dc_add_plane_to_context(
9185 dm_new_crtc_state->stream,
9187 dm_state->context)) {
9189 dc_plane_state_release(dc_new_plane_state);
9193 dm_new_plane_state->dc_state = dc_new_plane_state;
9195 /* Tell DC to do a full surface update every time there
9196 * is a plane change. Inefficient, but works for now.
9198 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9200 *lock_and_validation_needed = true;
9207 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9208 struct drm_crtc *crtc,
9209 struct drm_crtc_state *new_crtc_state)
9211 struct drm_plane_state *new_cursor_state, *new_primary_state;
9212 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9214 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9215 * cursor per pipe but it's going to inherit the scaling and
9216 * positioning from the underlying pipe. Check the cursor plane's
9217 * blending properties match the primary plane's. */
9219 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9220 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9221 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9225 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9226 (new_cursor_state->src_w >> 16);
9227 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9228 (new_cursor_state->src_h >> 16);
9230 primary_scale_w = new_primary_state->crtc_w * 1000 /
9231 (new_primary_state->src_w >> 16);
9232 primary_scale_h = new_primary_state->crtc_h * 1000 /
9233 (new_primary_state->src_h >> 16);
9235 if (cursor_scale_w != primary_scale_w ||
9236 cursor_scale_h != primary_scale_h) {
9237 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9244 #if defined(CONFIG_DRM_AMD_DC_DCN)
9245 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9247 struct drm_connector *connector;
9248 struct drm_connector_state *conn_state;
9249 struct amdgpu_dm_connector *aconnector = NULL;
9251 for_each_new_connector_in_state(state, connector, conn_state, i) {
9252 if (conn_state->crtc != crtc)
9255 aconnector = to_amdgpu_dm_connector(connector);
9256 if (!aconnector->port || !aconnector->mst_port)
9265 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9270 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9271 * @dev: The DRM device
9272 * @state: The atomic state to commit
9274 * Validate that the given atomic state is programmable by DC into hardware.
9275 * This involves constructing a &struct dc_state reflecting the new hardware
9276 * state we wish to commit, then querying DC to see if it is programmable. It's
9277 * important not to modify the existing DC state. Otherwise, atomic_check
9278 * may unexpectedly commit hardware changes.
9280 * When validating the DC state, it's important that the right locks are
9281 * acquired. For full updates case which removes/adds/updates streams on one
9282 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9283 * that any such full update commit will wait for completion of any outstanding
9284 * flip using DRMs synchronization events.
9286 * Note that DM adds the affected connectors for all CRTCs in state, when that
9287 * might not seem necessary. This is because DC stream creation requires the
9288 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9289 * be possible but non-trivial - a possible TODO item.
9291 * Return: -Error code if validation failed.
9293 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9294 struct drm_atomic_state *state)
9296 struct amdgpu_device *adev = drm_to_adev(dev);
9297 struct dm_atomic_state *dm_state = NULL;
9298 struct dc *dc = adev->dm.dc;
9299 struct drm_connector *connector;
9300 struct drm_connector_state *old_con_state, *new_con_state;
9301 struct drm_crtc *crtc;
9302 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9303 struct drm_plane *plane;
9304 struct drm_plane_state *old_plane_state, *new_plane_state;
9305 enum dc_status status;
9307 bool lock_and_validation_needed = false;
9308 struct dm_crtc_state *dm_old_crtc_state;
9310 trace_amdgpu_dm_atomic_check_begin(state);
9312 ret = drm_atomic_helper_check_modeset(dev, state);
9316 /* Check connector changes */
9317 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9318 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9319 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9321 /* Skip connectors that are disabled or part of modeset already. */
9322 if (!old_con_state->crtc && !new_con_state->crtc)
9325 if (!new_con_state->crtc)
9328 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9329 if (IS_ERR(new_crtc_state)) {
9330 ret = PTR_ERR(new_crtc_state);
9334 if (dm_old_con_state->abm_level !=
9335 dm_new_con_state->abm_level)
9336 new_crtc_state->connectors_changed = true;
9339 #if defined(CONFIG_DRM_AMD_DC_DCN)
9340 if (adev->asic_type >= CHIP_NAVI10) {
9341 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9342 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9343 ret = add_affected_mst_dsc_crtcs(state, crtc);
9350 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9351 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9353 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9354 !new_crtc_state->color_mgmt_changed &&
9355 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9356 dm_old_crtc_state->dsc_force_changed == false)
9359 if (!new_crtc_state->enable)
9362 ret = drm_atomic_add_affected_connectors(state, crtc);
9366 ret = drm_atomic_add_affected_planes(state, crtc);
9370 if (dm_old_crtc_state->dsc_force_changed)
9371 new_crtc_state->mode_changed = true;
9375 * Add all primary and overlay planes on the CRTC to the state
9376 * whenever a plane is enabled to maintain correct z-ordering
9377 * and to enable fast surface updates.
9379 drm_for_each_crtc(crtc, dev) {
9380 bool modified = false;
9382 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9383 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9386 if (new_plane_state->crtc == crtc ||
9387 old_plane_state->crtc == crtc) {
9396 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9397 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9401 drm_atomic_get_plane_state(state, plane);
9403 if (IS_ERR(new_plane_state)) {
9404 ret = PTR_ERR(new_plane_state);
9410 /* Remove exiting planes if they are modified */
9411 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9412 ret = dm_update_plane_state(dc, state, plane,
9416 &lock_and_validation_needed);
9421 /* Disable all crtcs which require disable */
9422 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9423 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9427 &lock_and_validation_needed);
9432 /* Enable all crtcs which require enable */
9433 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9434 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9438 &lock_and_validation_needed);
9443 /* Add new/modified planes */
9444 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9445 ret = dm_update_plane_state(dc, state, plane,
9449 &lock_and_validation_needed);
9454 /* Run this here since we want to validate the streams we created */
9455 ret = drm_atomic_helper_check_planes(dev, state);
9459 /* Check cursor planes scaling */
9460 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9461 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9466 if (state->legacy_cursor_update) {
9468 * This is a fast cursor update coming from the plane update
9469 * helper, check if it can be done asynchronously for better
9472 state->async_update =
9473 !drm_atomic_helper_async_check(dev, state);
9476 * Skip the remaining global validation if this is an async
9477 * update. Cursor updates can be done without affecting
9478 * state or bandwidth calcs and this avoids the performance
9479 * penalty of locking the private state object and
9480 * allocating a new dc_state.
9482 if (state->async_update)
9486 /* Check scaling and underscan changes*/
9487 /* TODO Removed scaling changes validation due to inability to commit
9488 * new stream into context w\o causing full reset. Need to
9489 * decide how to handle.
9491 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9492 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9493 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9494 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9496 /* Skip any modesets/resets */
9497 if (!acrtc || drm_atomic_crtc_needs_modeset(
9498 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9501 /* Skip any thing not scale or underscan changes */
9502 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9505 lock_and_validation_needed = true;
9509 * Streams and planes are reset when there are changes that affect
9510 * bandwidth. Anything that affects bandwidth needs to go through
9511 * DC global validation to ensure that the configuration can be applied
9514 * We have to currently stall out here in atomic_check for outstanding
9515 * commits to finish in this case because our IRQ handlers reference
9516 * DRM state directly - we can end up disabling interrupts too early
9519 * TODO: Remove this stall and drop DM state private objects.
9521 if (lock_and_validation_needed) {
9522 ret = dm_atomic_get_state(state, &dm_state);
9526 ret = do_aquire_global_lock(dev, state);
9530 #if defined(CONFIG_DRM_AMD_DC_DCN)
9531 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9534 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9540 * Perform validation of MST topology in the state:
9541 * We need to perform MST atomic check before calling
9542 * dc_validate_global_state(), or there is a chance
9543 * to get stuck in an infinite loop and hang eventually.
9545 ret = drm_dp_mst_atomic_check(state);
9548 status = dc_validate_global_state(dc, dm_state->context, false);
9549 if (status != DC_OK) {
9550 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9551 dc_status_to_str(status), status);
9557 * The commit is a fast update. Fast updates shouldn't change
9558 * the DC context, affect global validation, and can have their
9559 * commit work done in parallel with other commits not touching
9560 * the same resource. If we have a new DC context as part of
9561 * the DM atomic state from validation we need to free it and
9562 * retain the existing one instead.
9564 * Furthermore, since the DM atomic state only contains the DC
9565 * context and can safely be annulled, we can free the state
9566 * and clear the associated private object now to free
9567 * some memory and avoid a possible use-after-free later.
9570 for (i = 0; i < state->num_private_objs; i++) {
9571 struct drm_private_obj *obj = state->private_objs[i].ptr;
9573 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9574 int j = state->num_private_objs-1;
9576 dm_atomic_destroy_state(obj,
9577 state->private_objs[i].state);
9579 /* If i is not at the end of the array then the
9580 * last element needs to be moved to where i was
9581 * before the array can safely be truncated.
9584 state->private_objs[i] =
9585 state->private_objs[j];
9587 state->private_objs[j].ptr = NULL;
9588 state->private_objs[j].state = NULL;
9589 state->private_objs[j].old_state = NULL;
9590 state->private_objs[j].new_state = NULL;
9592 state->num_private_objs = j;
9598 /* Store the overall update type for use later in atomic check. */
9599 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9600 struct dm_crtc_state *dm_new_crtc_state =
9601 to_dm_crtc_state(new_crtc_state);
9603 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9608 /* Must be success */
9611 trace_amdgpu_dm_atomic_check_finish(state, ret);
9616 if (ret == -EDEADLK)
9617 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9618 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9619 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9621 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9623 trace_amdgpu_dm_atomic_check_finish(state, ret);
9628 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9629 struct amdgpu_dm_connector *amdgpu_dm_connector)
9632 bool capable = false;
9634 if (amdgpu_dm_connector->dc_link &&
9635 dm_helpers_dp_read_dpcd(
9637 amdgpu_dm_connector->dc_link,
9638 DP_DOWN_STREAM_PORT_COUNT,
9640 sizeof(dpcd_data))) {
9641 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9646 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9650 bool edid_check_required;
9651 struct detailed_timing *timing;
9652 struct detailed_non_pixel *data;
9653 struct detailed_data_monitor_range *range;
9654 struct amdgpu_dm_connector *amdgpu_dm_connector =
9655 to_amdgpu_dm_connector(connector);
9656 struct dm_connector_state *dm_con_state = NULL;
9658 struct drm_device *dev = connector->dev;
9659 struct amdgpu_device *adev = drm_to_adev(dev);
9660 bool freesync_capable = false;
9662 if (!connector->state) {
9663 DRM_ERROR("%s - Connector has no state", __func__);
9668 dm_con_state = to_dm_connector_state(connector->state);
9670 amdgpu_dm_connector->min_vfreq = 0;
9671 amdgpu_dm_connector->max_vfreq = 0;
9672 amdgpu_dm_connector->pixel_clock_mhz = 0;
9677 dm_con_state = to_dm_connector_state(connector->state);
9679 edid_check_required = false;
9680 if (!amdgpu_dm_connector->dc_sink) {
9681 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9684 if (!adev->dm.freesync_module)
9687 * if edid non zero restrict freesync only for dp and edp
9690 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9691 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9692 edid_check_required = is_dp_capable_without_timing_msa(
9694 amdgpu_dm_connector);
9697 if (edid_check_required == true && (edid->version > 1 ||
9698 (edid->version == 1 && edid->revision > 1))) {
9699 for (i = 0; i < 4; i++) {
9701 timing = &edid->detailed_timings[i];
9702 data = &timing->data.other_data;
9703 range = &data->data.range;
9705 * Check if monitor has continuous frequency mode
9707 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9710 * Check for flag range limits only. If flag == 1 then
9711 * no additional timing information provided.
9712 * Default GTF, GTF Secondary curve and CVT are not
9715 if (range->flags != 1)
9718 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9719 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9720 amdgpu_dm_connector->pixel_clock_mhz =
9721 range->pixel_clock_mhz * 10;
9725 if (amdgpu_dm_connector->max_vfreq -
9726 amdgpu_dm_connector->min_vfreq > 10) {
9728 freesync_capable = true;
9734 dm_con_state->freesync_capable = freesync_capable;
9736 if (connector->vrr_capable_property)
9737 drm_connector_set_vrr_capable_property(connector,
9741 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9743 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9745 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9747 if (link->type == dc_connection_none)
9749 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9750 dpcd_data, sizeof(dpcd_data))) {
9751 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9753 if (dpcd_data[0] == 0) {
9754 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9755 link->psr_settings.psr_feature_enabled = false;
9757 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9758 link->psr_settings.psr_feature_enabled = true;
9761 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9766 * amdgpu_dm_link_setup_psr() - configure psr link
9767 * @stream: stream state
9769 * Return: true if success
9771 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9773 struct dc_link *link = NULL;
9774 struct psr_config psr_config = {0};
9775 struct psr_context psr_context = {0};
9781 link = stream->link;
9783 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9785 if (psr_config.psr_version > 0) {
9786 psr_config.psr_exit_link_training_required = 0x1;
9787 psr_config.psr_frame_capture_indication_req = 0;
9788 psr_config.psr_rfb_setup_time = 0x37;
9789 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9790 psr_config.allow_smu_optimizations = 0x0;
9792 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9795 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9801 * amdgpu_dm_psr_enable() - enable psr f/w
9802 * @stream: stream state
9804 * Return: true if success
9806 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9808 struct dc_link *link = stream->link;
9809 unsigned int vsync_rate_hz = 0;
9810 struct dc_static_screen_params params = {0};
9811 /* Calculate number of static frames before generating interrupt to
9814 // Init fail safe of 2 frames static
9815 unsigned int num_frames_static = 2;
9817 DRM_DEBUG_DRIVER("Enabling psr...\n");
9819 vsync_rate_hz = div64_u64(div64_u64((
9820 stream->timing.pix_clk_100hz * 100),
9821 stream->timing.v_total),
9822 stream->timing.h_total);
9825 * Calculate number of frames such that at least 30 ms of time has
9828 if (vsync_rate_hz != 0) {
9829 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9830 num_frames_static = (30000 / frame_time_microsec) + 1;
9833 params.triggers.cursor_update = true;
9834 params.triggers.overlay_update = true;
9835 params.triggers.surface_update = true;
9836 params.num_frames = num_frames_static;
9838 dc_stream_set_static_screen_params(link->ctx->dc,
9842 return dc_link_set_psr_allow_active(link, true, false, false);
9846 * amdgpu_dm_psr_disable() - disable psr f/w
9847 * @stream: stream state
9849 * Return: true if success
9851 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9854 DRM_DEBUG_DRIVER("Disabling psr...\n");
9856 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9860 * amdgpu_dm_psr_disable() - disable psr f/w
9861 * if psr is enabled on any stream
9863 * Return: true if success
9865 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9867 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9868 return dc_set_psr_allow_active(dm->dc, false);
9871 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9873 struct amdgpu_device *adev = drm_to_adev(dev);
9874 struct dc *dc = adev->dm.dc;
9877 mutex_lock(&adev->dm.dc_lock);
9878 if (dc->current_state) {
9879 for (i = 0; i < dc->current_state->stream_count; ++i)
9880 dc->current_state->streams[i]
9881 ->triggered_crtc_reset.enabled =
9882 adev->dm.force_timing_sync;
9884 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9885 dc_trigger_sync(dc, dc->current_state);
9887 mutex_unlock(&adev->dm.dc_lock);
9890 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9891 uint32_t value, const char *func_name)
9893 #ifdef DM_CHECK_ADDR_0
9895 DC_ERR("invalid register write. address = 0");
9899 cgs_write_register(ctx->cgs_device, address, value);
9900 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9903 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9904 const char *func_name)
9907 #ifdef DM_CHECK_ADDR_0
9909 DC_ERR("invalid register read; address = 0\n");
9914 if (ctx->dmub_srv &&
9915 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9916 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9921 value = cgs_read_register(ctx->cgs_device, address);
9923 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);