2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
50 #include "amdgpu_pm.h"
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
60 #include "ivsrcid/ivsrcid_vislands30.h"
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
217 struct drm_crtc_state *new_crtc_state);
219 * dm_vblank_get_counter
222 * Get counter for number of vertical blanks
225 * struct amdgpu_device *adev - [in] desired amdgpu device
226 * int disp_idx - [in] which CRTC to get the counter from
229 * Counter for vertical blanks
231 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 if (crtc >= adev->mode_info.num_crtc)
236 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 if (acrtc->dm_irq_params.stream == NULL) {
239 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
244 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
249 u32 *vbl, u32 *position)
251 uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 if (acrtc->dm_irq_params.stream == NULL) {
259 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 * TODO rework base driver to use values directly.
266 * for now parse it back into reg-format
268 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 *position = v_position | (h_position << 16);
275 *vbl = v_blank_start | (v_blank_end << 16);
281 static bool dm_is_idle(void *handle)
287 static int dm_wait_for_idle(void *handle)
293 static bool dm_check_soft_reset(void *handle)
298 static int dm_soft_reset(void *handle)
304 static struct amdgpu_crtc *
305 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 struct drm_device *dev = adev_to_drm(adev);
309 struct drm_crtc *crtc;
310 struct amdgpu_crtc *amdgpu_crtc;
312 if (otg_inst == -1) {
314 return adev->mode_info.crtcs[0];
317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
318 amdgpu_crtc = to_amdgpu_crtc(crtc);
320 if (amdgpu_crtc->otg_inst == otg_inst)
327 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 return acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_VARIABLE ||
331 acrtc->dm_irq_params.freesync_config.state ==
332 VRR_STATE_ACTIVE_FIXED;
335 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
338 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342 struct dm_crtc_state *new_state)
344 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 * dm_pflip_high_irq() - Handle pageflip interrupt
354 * @interrupt_params: ignored
356 * Handles the pageflip interrupt by notifying all interested parties
357 * that the pageflip has been completed.
359 static void dm_pflip_high_irq(void *interrupt_params)
361 struct amdgpu_crtc *amdgpu_crtc;
362 struct common_irq_params *irq_params = interrupt_params;
363 struct amdgpu_device *adev = irq_params->adev;
365 struct drm_pending_vblank_event *e;
366 uint32_t vpos, hpos, v_blank_start, v_blank_end;
369 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 /* IRQ could occur when in initial stage */
372 /* TODO work and BO cleanup */
373 if (amdgpu_crtc == NULL) {
374 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
378 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
381 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
382 amdgpu_crtc->pflip_status,
383 AMDGPU_FLIP_SUBMITTED,
384 amdgpu_crtc->crtc_id,
386 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 /* page flip completed. */
391 e = amdgpu_crtc->event;
392 amdgpu_crtc->event = NULL;
397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404 /* Update to correct count and vblank timestamp if racing with
405 * vblank irq. This also updates to the correct vblank timestamp
406 * even in VRR mode, as scanout is past the front-porch atm.
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 /* Wake up userspace by sending the pageflip event with proper
411 * count and timestamp of vblank of flip completion.
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 /* Event sent, so done with vblank for this flip */
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 /* VRR active and inside front-porch: vblank count and
421 * timestamp for pageflip event will only be up to date after
422 * drm_crtc_handle_vblank() has been executed from late vblank
423 * irq handler after start of back-porch (vline 0). We queue the
424 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 * updated timestamp and count, once it runs after us.
427 * We need to open-code this instead of using the helper
428 * drm_crtc_arm_vblank_event(), as that helper would
429 * call drm_crtc_accurate_vblank_count(), which we must
430 * not call in VRR mode while we are in front-porch!
433 /* sequence will be replaced by real count during send-out. */
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 /* Keep track of vblank of this flip for flip throttling. We use the
442 * cooked hw counter, as that one incremented at start of this vblank
443 * of pageflip completion, so last_flip_vblank is the forbidden count
444 * for queueing new pageflips if vsync + VRR is enabled.
446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
457 static void dm_vupdate_high_irq(void *interrupt_params)
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
465 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
468 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
470 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
474 /* Core vblank handling is done here after end of front-porch in
475 * vrr mode, as vblank timestamping will give valid results
476 * while now done after front-porch. This will also deliver
477 * page-flip completion events that have been queued to us
478 * if a pageflip happened inside front-porch.
481 drm_crtc_handle_vblank(&acrtc->base);
483 /* BTR processing for pre-DCE12 ASICs */
484 if (acrtc->dm_irq_params.stream &&
485 adev->family < AMDGPU_FAMILY_AI) {
486 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
487 mod_freesync_handle_v_update(
488 adev->dm.freesync_module,
489 acrtc->dm_irq_params.stream,
490 &acrtc->dm_irq_params.vrr_params);
492 dc_stream_adjust_vmin_vmax(
494 acrtc->dm_irq_params.stream,
495 &acrtc->dm_irq_params.vrr_params.adjust);
496 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
503 * dm_crtc_high_irq() - Handles CRTC interrupt
504 * @interrupt_params: used for determining the CRTC instance
506 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
509 static void dm_crtc_high_irq(void *interrupt_params)
511 struct common_irq_params *irq_params = interrupt_params;
512 struct amdgpu_device *adev = irq_params->adev;
513 struct amdgpu_crtc *acrtc;
517 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
523 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
524 vrr_active, acrtc->dm_irq_params.active_planes);
527 * Core vblank handling at start of front-porch is only possible
528 * in non-vrr mode, as only there vblank timestamping will give
529 * valid results while done in front-porch. Otherwise defer it
530 * to dm_vupdate_high_irq after end of front-porch.
533 drm_crtc_handle_vblank(&acrtc->base);
536 * Following stuff must happen at start of vblank, for crc
537 * computation and below-the-range btr support in vrr mode.
539 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
541 /* BTR updates need to happen before VUPDATE on Vega and above. */
542 if (adev->family < AMDGPU_FAMILY_AI)
545 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
547 if (acrtc->dm_irq_params.stream &&
548 acrtc->dm_irq_params.vrr_params.supported &&
549 acrtc->dm_irq_params.freesync_config.state ==
550 VRR_STATE_ACTIVE_VARIABLE) {
551 mod_freesync_handle_v_update(adev->dm.freesync_module,
552 acrtc->dm_irq_params.stream,
553 &acrtc->dm_irq_params.vrr_params);
555 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
556 &acrtc->dm_irq_params.vrr_params.adjust);
560 * If there aren't any active_planes then DCH HUBP may be clock-gated.
561 * In that case, pageflip completion interrupts won't fire and pageflip
562 * completion events won't get delivered. Prevent this by sending
563 * pending pageflip events from here if a flip is still pending.
565 * If any planes are enabled, use dm_pflip_high_irq() instead, to
566 * avoid race conditions between flip programming and completion,
567 * which could cause too early flip completion events.
569 if (adev->family >= AMDGPU_FAMILY_RV &&
570 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
571 acrtc->dm_irq_params.active_planes == 0) {
573 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
575 drm_crtc_vblank_put(&acrtc->base);
577 acrtc->pflip_status = AMDGPU_FLIP_NONE;
580 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
583 #if defined(CONFIG_DRM_AMD_DC_DCN)
585 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
586 * DCN generation ASICs
587 * @interrupt params - interrupt parameters
589 * Used to set crc window/read out crc value at vertical line 0 position
591 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
592 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
594 struct common_irq_params *irq_params = interrupt_params;
595 struct amdgpu_device *adev = irq_params->adev;
596 struct amdgpu_crtc *acrtc;
598 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
603 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
608 static int dm_set_clockgating_state(void *handle,
609 enum amd_clockgating_state state)
614 static int dm_set_powergating_state(void *handle,
615 enum amd_powergating_state state)
620 /* Prototypes of private functions */
621 static int dm_early_init(void* handle);
623 /* Allocate memory for FBC compressed data */
624 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
626 struct drm_device *dev = connector->dev;
627 struct amdgpu_device *adev = drm_to_adev(dev);
628 struct dm_compressor_info *compressor = &adev->dm.compressor;
629 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
630 struct drm_display_mode *mode;
631 unsigned long max_size = 0;
633 if (adev->dm.dc->fbc_compressor == NULL)
636 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
639 if (compressor->bo_ptr)
643 list_for_each_entry(mode, &connector->modes, head) {
644 if (max_size < mode->htotal * mode->vtotal)
645 max_size = mode->htotal * mode->vtotal;
649 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
650 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
651 &compressor->gpu_addr, &compressor->cpu_addr);
654 DRM_ERROR("DM: Failed to initialize FBC\n");
656 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
657 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
664 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
665 int pipe, bool *enabled,
666 unsigned char *buf, int max_bytes)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_connector *connector;
671 struct drm_connector_list_iter conn_iter;
672 struct amdgpu_dm_connector *aconnector;
677 mutex_lock(&adev->dm.audio_lock);
679 drm_connector_list_iter_begin(dev, &conn_iter);
680 drm_for_each_connector_iter(connector, &conn_iter) {
681 aconnector = to_amdgpu_dm_connector(connector);
682 if (aconnector->audio_inst != port)
686 ret = drm_eld_size(connector->eld);
687 memcpy(buf, connector->eld, min(max_bytes, ret));
691 drm_connector_list_iter_end(&conn_iter);
693 mutex_unlock(&adev->dm.audio_lock);
695 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
700 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
701 .get_eld = amdgpu_dm_audio_component_get_eld,
704 static int amdgpu_dm_audio_component_bind(struct device *kdev,
705 struct device *hda_kdev, void *data)
707 struct drm_device *dev = dev_get_drvdata(kdev);
708 struct amdgpu_device *adev = drm_to_adev(dev);
709 struct drm_audio_component *acomp = data;
711 acomp->ops = &amdgpu_dm_audio_component_ops;
713 adev->dm.audio_component = acomp;
718 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
719 struct device *hda_kdev, void *data)
721 struct drm_device *dev = dev_get_drvdata(kdev);
722 struct amdgpu_device *adev = drm_to_adev(dev);
723 struct drm_audio_component *acomp = data;
727 adev->dm.audio_component = NULL;
730 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
731 .bind = amdgpu_dm_audio_component_bind,
732 .unbind = amdgpu_dm_audio_component_unbind,
735 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
742 adev->mode_info.audio.enabled = true;
744 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
746 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
747 adev->mode_info.audio.pin[i].channels = -1;
748 adev->mode_info.audio.pin[i].rate = -1;
749 adev->mode_info.audio.pin[i].bits_per_sample = -1;
750 adev->mode_info.audio.pin[i].status_bits = 0;
751 adev->mode_info.audio.pin[i].category_code = 0;
752 adev->mode_info.audio.pin[i].connected = false;
753 adev->mode_info.audio.pin[i].id =
754 adev->dm.dc->res_pool->audios[i]->inst;
755 adev->mode_info.audio.pin[i].offset = 0;
758 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
762 adev->dm.audio_registered = true;
767 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
772 if (!adev->mode_info.audio.enabled)
775 if (adev->dm.audio_registered) {
776 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 adev->dm.audio_registered = false;
780 /* TODO: Disable audio? */
782 adev->mode_info.audio.enabled = false;
785 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
787 struct drm_audio_component *acomp = adev->dm.audio_component;
789 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
790 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
792 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
797 static int dm_dmub_hw_init(struct amdgpu_device *adev)
799 const struct dmcub_firmware_header_v1_0 *hdr;
800 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
801 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
802 const struct firmware *dmub_fw = adev->dm.dmub_fw;
803 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
804 struct abm *abm = adev->dm.dc->res_pool->abm;
805 struct dmub_srv_hw_params hw_params;
806 enum dmub_status status;
807 const unsigned char *fw_inst_const, *fw_bss_data;
808 uint32_t i, fw_inst_const_size, fw_bss_data_size;
812 /* DMUB isn't supported on the ASIC. */
816 DRM_ERROR("No framebuffer info for DMUB service.\n");
821 /* Firmware required for DMUB support. */
822 DRM_ERROR("No firmware provided for DMUB.\n");
826 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
827 if (status != DMUB_STATUS_OK) {
828 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
832 if (!has_hw_support) {
833 DRM_INFO("DMUB unsupported on ASIC\n");
837 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
839 fw_inst_const = dmub_fw->data +
840 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
843 fw_bss_data = dmub_fw->data +
844 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
845 le32_to_cpu(hdr->inst_const_bytes);
847 /* Copy firmware and bios info into FB memory. */
848 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
849 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
851 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
853 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
854 * amdgpu_ucode_init_single_fw will load dmub firmware
855 * fw_inst_const part to cw0; otherwise, the firmware back door load
856 * will be done by dm_dmub_hw_init
858 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
859 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
863 if (fw_bss_data_size)
864 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
865 fw_bss_data, fw_bss_data_size);
867 /* Copy firmware bios info into FB memory. */
868 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
871 /* Reset regions that need to be reset. */
872 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
873 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
875 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
876 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
878 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
879 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
881 /* Initialize hardware. */
882 memset(&hw_params, 0, sizeof(hw_params));
883 hw_params.fb_base = adev->gmc.fb_start;
884 hw_params.fb_offset = adev->gmc.aper_base;
886 /* backdoor load firmware and trigger dmub running */
887 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
888 hw_params.load_inst_const = true;
891 hw_params.psp_version = dmcu->psp_version;
893 for (i = 0; i < fb_info->num_fb; ++i)
894 hw_params.fb[i] = &fb_info->fb[i];
896 status = dmub_srv_hw_init(dmub_srv, &hw_params);
897 if (status != DMUB_STATUS_OK) {
898 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
902 /* Wait for firmware load to finish. */
903 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
904 if (status != DMUB_STATUS_OK)
905 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
907 /* Init DMCU and ABM if available. */
909 dmcu->funcs->dmcu_init(dmcu);
910 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
913 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
914 if (!adev->dm.dc->ctx->dmub_srv) {
915 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
919 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
920 adev->dm.dmcub_fw_version);
925 #if defined(CONFIG_DRM_AMD_DC_DCN)
926 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
929 uint32_t logical_addr_low;
930 uint32_t logical_addr_high;
931 uint32_t agp_base, agp_bot, agp_top;
932 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
934 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
935 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
937 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
939 * Raven2 has a HW issue that it is unable to use the vram which
940 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
941 * workaround that increase system aperture high address (add 1)
942 * to get rid of the VM fault and hardware hang.
944 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
946 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
949 agp_bot = adev->gmc.agp_start >> 24;
950 agp_top = adev->gmc.agp_end >> 24;
953 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
954 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
955 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
956 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
957 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
958 page_table_base.low_part = lower_32_bits(pt_base);
960 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
961 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
963 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
964 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
965 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
967 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
968 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
969 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
971 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
972 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
973 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
975 pa_config->is_hvm_enabled = 0;
979 #if defined(CONFIG_DRM_AMD_DC_DCN)
980 static void event_mall_stutter(struct work_struct *work)
983 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
984 struct amdgpu_display_manager *dm = vblank_work->dm;
986 mutex_lock(&dm->dc_lock);
988 if (vblank_work->enable)
989 dm->active_vblank_irq_count++;
991 dm->active_vblank_irq_count--;
993 dc_allow_idle_optimizations(
994 dm->dc, dm->active_vblank_irq_count == 0);
996 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
999 mutex_unlock(&dm->dc_lock);
1002 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1005 int max_caps = dc->caps.max_links;
1006 struct vblank_workqueue *vblank_work;
1009 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1010 if (ZERO_OR_NULL_PTR(vblank_work)) {
1015 for (i = 0; i < max_caps; i++)
1016 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1021 static int amdgpu_dm_init(struct amdgpu_device *adev)
1023 struct dc_init_data init_data;
1024 #ifdef CONFIG_DRM_AMD_DC_HDCP
1025 struct dc_callback_init init_params;
1029 adev->dm.ddev = adev_to_drm(adev);
1030 adev->dm.adev = adev;
1032 /* Zero all the fields */
1033 memset(&init_data, 0, sizeof(init_data));
1034 #ifdef CONFIG_DRM_AMD_DC_HDCP
1035 memset(&init_params, 0, sizeof(init_params));
1038 mutex_init(&adev->dm.dc_lock);
1039 mutex_init(&adev->dm.audio_lock);
1040 #if defined(CONFIG_DRM_AMD_DC_DCN)
1041 spin_lock_init(&adev->dm.vblank_lock);
1044 if(amdgpu_dm_irq_init(adev)) {
1045 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1049 init_data.asic_id.chip_family = adev->family;
1051 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1052 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1054 init_data.asic_id.vram_width = adev->gmc.vram_width;
1055 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1056 init_data.asic_id.atombios_base_address =
1057 adev->mode_info.atom_context->bios;
1059 init_data.driver = adev;
1061 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1063 if (!adev->dm.cgs_device) {
1064 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1068 init_data.cgs_device = adev->dm.cgs_device;
1070 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1072 switch (adev->asic_type) {
1077 init_data.flags.gpu_vm_support = true;
1078 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1079 init_data.flags.disable_dmcu = true;
1081 #if defined(CONFIG_DRM_AMD_DC_DCN)
1083 init_data.flags.gpu_vm_support = true;
1090 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1091 init_data.flags.fbc_support = true;
1093 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1094 init_data.flags.multi_mon_pp_mclk_switch = true;
1096 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1097 init_data.flags.disable_fractional_pwm = true;
1099 init_data.flags.power_down_display_on_boot = true;
1101 INIT_LIST_HEAD(&adev->dm.da_list);
1102 /* Display Core create. */
1103 adev->dm.dc = dc_create(&init_data);
1106 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1108 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1112 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1113 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1114 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1117 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1118 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1120 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1121 adev->dm.dc->debug.disable_stutter = true;
1123 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1124 adev->dm.dc->debug.disable_dsc = true;
1126 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1127 adev->dm.dc->debug.disable_clock_gate = true;
1129 r = dm_dmub_hw_init(adev);
1131 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1135 dc_hardware_init(adev->dm.dc);
1137 #if defined(CONFIG_DRM_AMD_DC_DCN)
1138 if (adev->apu_flags) {
1139 struct dc_phy_addr_space_config pa_config;
1141 mmhub_read_system_context(adev, &pa_config);
1143 // Call the DC init_memory func
1144 dc_setup_system_context(adev->dm.dc, &pa_config);
1148 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1149 if (!adev->dm.freesync_module) {
1151 "amdgpu: failed to initialize freesync_module.\n");
1153 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1154 adev->dm.freesync_module);
1156 amdgpu_dm_init_color_mod();
1158 #if defined(CONFIG_DRM_AMD_DC_DCN)
1159 if (adev->dm.dc->caps.max_links > 0) {
1160 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1162 if (!adev->dm.vblank_workqueue)
1163 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1165 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1169 #ifdef CONFIG_DRM_AMD_DC_HDCP
1170 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1171 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1173 if (!adev->dm.hdcp_workqueue)
1174 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1176 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1178 dc_init_callbacks(adev->dm.dc, &init_params);
1181 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1182 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1184 if (amdgpu_dm_initialize_drm_device(adev)) {
1186 "amdgpu: failed to initialize sw for display support.\n");
1190 /* create fake encoders for MST */
1191 dm_dp_create_fake_mst_encoders(adev);
1193 /* TODO: Add_display_info? */
1195 /* TODO use dynamic cursor width */
1196 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1197 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1199 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1201 "amdgpu: failed to initialize sw for display support.\n");
1206 DRM_DEBUG_DRIVER("KMS initialized.\n");
1210 amdgpu_dm_fini(adev);
1215 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1219 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1220 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1223 amdgpu_dm_audio_fini(adev);
1225 amdgpu_dm_destroy_drm_device(&adev->dm);
1227 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1228 if (adev->dm.crc_rd_wrk) {
1229 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1230 kfree(adev->dm.crc_rd_wrk);
1231 adev->dm.crc_rd_wrk = NULL;
1234 #ifdef CONFIG_DRM_AMD_DC_HDCP
1235 if (adev->dm.hdcp_workqueue) {
1236 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1237 adev->dm.hdcp_workqueue = NULL;
1241 dc_deinit_callbacks(adev->dm.dc);
1244 #if defined(CONFIG_DRM_AMD_DC_DCN)
1245 if (adev->dm.vblank_workqueue) {
1246 adev->dm.vblank_workqueue->dm = NULL;
1247 kfree(adev->dm.vblank_workqueue);
1248 adev->dm.vblank_workqueue = NULL;
1252 if (adev->dm.dc->ctx->dmub_srv) {
1253 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1254 adev->dm.dc->ctx->dmub_srv = NULL;
1257 if (adev->dm.dmub_bo)
1258 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1259 &adev->dm.dmub_bo_gpu_addr,
1260 &adev->dm.dmub_bo_cpu_addr);
1262 /* DC Destroy TODO: Replace destroy DAL */
1264 dc_destroy(&adev->dm.dc);
1266 * TODO: pageflip, vlank interrupt
1268 * amdgpu_dm_irq_fini(adev);
1271 if (adev->dm.cgs_device) {
1272 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1273 adev->dm.cgs_device = NULL;
1275 if (adev->dm.freesync_module) {
1276 mod_freesync_destroy(adev->dm.freesync_module);
1277 adev->dm.freesync_module = NULL;
1280 mutex_destroy(&adev->dm.audio_lock);
1281 mutex_destroy(&adev->dm.dc_lock);
1286 static int load_dmcu_fw(struct amdgpu_device *adev)
1288 const char *fw_name_dmcu = NULL;
1290 const struct dmcu_firmware_header_v1_0 *hdr;
1292 switch(adev->asic_type) {
1293 #if defined(CONFIG_DRM_AMD_DC_SI)
1308 case CHIP_POLARIS11:
1309 case CHIP_POLARIS10:
1310 case CHIP_POLARIS12:
1318 case CHIP_SIENNA_CICHLID:
1319 case CHIP_NAVY_FLOUNDER:
1320 case CHIP_DIMGREY_CAVEFISH:
1324 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1327 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1328 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1329 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1330 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1335 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1339 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1340 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1344 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1346 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1347 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1348 adev->dm.fw_dmcu = NULL;
1352 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1357 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1359 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1361 release_firmware(adev->dm.fw_dmcu);
1362 adev->dm.fw_dmcu = NULL;
1366 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1367 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1368 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1369 adev->firmware.fw_size +=
1370 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1372 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1373 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1374 adev->firmware.fw_size +=
1375 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1377 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1379 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1384 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1386 struct amdgpu_device *adev = ctx;
1388 return dm_read_reg(adev->dm.dc->ctx, address);
1391 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1394 struct amdgpu_device *adev = ctx;
1396 return dm_write_reg(adev->dm.dc->ctx, address, value);
1399 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1401 struct dmub_srv_create_params create_params;
1402 struct dmub_srv_region_params region_params;
1403 struct dmub_srv_region_info region_info;
1404 struct dmub_srv_fb_params fb_params;
1405 struct dmub_srv_fb_info *fb_info;
1406 struct dmub_srv *dmub_srv;
1407 const struct dmcub_firmware_header_v1_0 *hdr;
1408 const char *fw_name_dmub;
1409 enum dmub_asic dmub_asic;
1410 enum dmub_status status;
1413 switch (adev->asic_type) {
1415 dmub_asic = DMUB_ASIC_DCN21;
1416 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1417 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1418 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1420 case CHIP_SIENNA_CICHLID:
1421 dmub_asic = DMUB_ASIC_DCN30;
1422 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1424 case CHIP_NAVY_FLOUNDER:
1425 dmub_asic = DMUB_ASIC_DCN30;
1426 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1429 dmub_asic = DMUB_ASIC_DCN301;
1430 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1432 case CHIP_DIMGREY_CAVEFISH:
1433 dmub_asic = DMUB_ASIC_DCN302;
1434 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1438 /* ASIC doesn't support DMUB. */
1442 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1444 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1448 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1450 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1454 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1456 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1457 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1458 AMDGPU_UCODE_ID_DMCUB;
1459 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1461 adev->firmware.fw_size +=
1462 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1464 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1465 adev->dm.dmcub_fw_version);
1468 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1470 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1471 dmub_srv = adev->dm.dmub_srv;
1474 DRM_ERROR("Failed to allocate DMUB service!\n");
1478 memset(&create_params, 0, sizeof(create_params));
1479 create_params.user_ctx = adev;
1480 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1481 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1482 create_params.asic = dmub_asic;
1484 /* Create the DMUB service. */
1485 status = dmub_srv_create(dmub_srv, &create_params);
1486 if (status != DMUB_STATUS_OK) {
1487 DRM_ERROR("Error creating DMUB service: %d\n", status);
1491 /* Calculate the size of all the regions for the DMUB service. */
1492 memset(®ion_params, 0, sizeof(region_params));
1494 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1495 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1496 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1497 region_params.vbios_size = adev->bios_size;
1498 region_params.fw_bss_data = region_params.bss_data_size ?
1499 adev->dm.dmub_fw->data +
1500 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1501 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1502 region_params.fw_inst_const =
1503 adev->dm.dmub_fw->data +
1504 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1507 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1510 if (status != DMUB_STATUS_OK) {
1511 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1516 * Allocate a framebuffer based on the total size of all the regions.
1517 * TODO: Move this into GART.
1519 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1520 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1521 &adev->dm.dmub_bo_gpu_addr,
1522 &adev->dm.dmub_bo_cpu_addr);
1526 /* Rebase the regions on the framebuffer address. */
1527 memset(&fb_params, 0, sizeof(fb_params));
1528 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1529 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1530 fb_params.region_info = ®ion_info;
1532 adev->dm.dmub_fb_info =
1533 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1534 fb_info = adev->dm.dmub_fb_info;
1538 "Failed to allocate framebuffer info for DMUB service!\n");
1542 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1543 if (status != DMUB_STATUS_OK) {
1544 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1551 static int dm_sw_init(void *handle)
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1556 r = dm_dmub_sw_init(adev);
1560 return load_dmcu_fw(adev);
1563 static int dm_sw_fini(void *handle)
1565 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1567 kfree(adev->dm.dmub_fb_info);
1568 adev->dm.dmub_fb_info = NULL;
1570 if (adev->dm.dmub_srv) {
1571 dmub_srv_destroy(adev->dm.dmub_srv);
1572 adev->dm.dmub_srv = NULL;
1575 release_firmware(adev->dm.dmub_fw);
1576 adev->dm.dmub_fw = NULL;
1578 release_firmware(adev->dm.fw_dmcu);
1579 adev->dm.fw_dmcu = NULL;
1584 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1586 struct amdgpu_dm_connector *aconnector;
1587 struct drm_connector *connector;
1588 struct drm_connector_list_iter iter;
1591 drm_connector_list_iter_begin(dev, &iter);
1592 drm_for_each_connector_iter(connector, &iter) {
1593 aconnector = to_amdgpu_dm_connector(connector);
1594 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1595 aconnector->mst_mgr.aux) {
1596 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1598 aconnector->base.base.id);
1600 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1602 DRM_ERROR("DM_MST: Failed to start MST\n");
1603 aconnector->dc_link->type =
1604 dc_connection_single;
1609 drm_connector_list_iter_end(&iter);
1614 static int dm_late_init(void *handle)
1616 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1618 struct dmcu_iram_parameters params;
1619 unsigned int linear_lut[16];
1621 struct dmcu *dmcu = NULL;
1624 dmcu = adev->dm.dc->res_pool->dmcu;
1626 for (i = 0; i < 16; i++)
1627 linear_lut[i] = 0xFFFF * i / 15;
1630 params.backlight_ramping_start = 0xCCCC;
1631 params.backlight_ramping_reduction = 0xCCCCCCCC;
1632 params.backlight_lut_array_size = 16;
1633 params.backlight_lut_array = linear_lut;
1635 /* Min backlight level after ABM reduction, Don't allow below 1%
1636 * 0xFFFF x 0.01 = 0x28F
1638 params.min_abm_backlight = 0x28F;
1640 /* In the case where abm is implemented on dmcub,
1641 * dmcu object will be null.
1642 * ABM 2.4 and up are implemented on dmcub.
1645 ret = dmcu_load_iram(dmcu, params);
1646 else if (adev->dm.dc->ctx->dmub_srv)
1647 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1652 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1655 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1657 struct amdgpu_dm_connector *aconnector;
1658 struct drm_connector *connector;
1659 struct drm_connector_list_iter iter;
1660 struct drm_dp_mst_topology_mgr *mgr;
1662 bool need_hotplug = false;
1664 drm_connector_list_iter_begin(dev, &iter);
1665 drm_for_each_connector_iter(connector, &iter) {
1666 aconnector = to_amdgpu_dm_connector(connector);
1667 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1668 aconnector->mst_port)
1671 mgr = &aconnector->mst_mgr;
1674 drm_dp_mst_topology_mgr_suspend(mgr);
1676 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1678 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1679 need_hotplug = true;
1683 drm_connector_list_iter_end(&iter);
1686 drm_kms_helper_hotplug_event(dev);
1689 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1691 struct smu_context *smu = &adev->smu;
1694 if (!is_support_sw_smu(adev))
1697 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1698 * on window driver dc implementation.
1699 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1700 * should be passed to smu during boot up and resume from s3.
1701 * boot up: dc calculate dcn watermark clock settings within dc_create,
1702 * dcn20_resource_construct
1703 * then call pplib functions below to pass the settings to smu:
1704 * smu_set_watermarks_for_clock_ranges
1705 * smu_set_watermarks_table
1706 * navi10_set_watermarks_table
1707 * smu_write_watermarks_table
1709 * For Renoir, clock settings of dcn watermark are also fixed values.
1710 * dc has implemented different flow for window driver:
1711 * dc_hardware_init / dc_set_power_state
1716 * smu_set_watermarks_for_clock_ranges
1717 * renoir_set_watermarks_table
1718 * smu_write_watermarks_table
1721 * dc_hardware_init -> amdgpu_dm_init
1722 * dc_set_power_state --> dm_resume
1724 * therefore, this function apply to navi10/12/14 but not Renoir
1727 switch(adev->asic_type) {
1736 ret = smu_write_watermarks_table(smu);
1738 DRM_ERROR("Failed to update WMTABLE!\n");
1746 * dm_hw_init() - Initialize DC device
1747 * @handle: The base driver device containing the amdgpu_dm device.
1749 * Initialize the &struct amdgpu_display_manager device. This involves calling
1750 * the initializers of each DM component, then populating the struct with them.
1752 * Although the function implies hardware initialization, both hardware and
1753 * software are initialized here. Splitting them out to their relevant init
1754 * hooks is a future TODO item.
1756 * Some notable things that are initialized here:
1758 * - Display Core, both software and hardware
1759 * - DC modules that we need (freesync and color management)
1760 * - DRM software states
1761 * - Interrupt sources and handlers
1763 * - Debug FS entries, if enabled
1765 static int dm_hw_init(void *handle)
1767 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1768 /* Create DAL display manager */
1769 amdgpu_dm_init(adev);
1770 amdgpu_dm_hpd_init(adev);
1776 * dm_hw_fini() - Teardown DC device
1777 * @handle: The base driver device containing the amdgpu_dm device.
1779 * Teardown components within &struct amdgpu_display_manager that require
1780 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1781 * were loaded. Also flush IRQ workqueues and disable them.
1783 static int dm_hw_fini(void *handle)
1785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1787 amdgpu_dm_hpd_fini(adev);
1789 amdgpu_dm_irq_fini(adev);
1790 amdgpu_dm_fini(adev);
1795 static int dm_enable_vblank(struct drm_crtc *crtc);
1796 static void dm_disable_vblank(struct drm_crtc *crtc);
1798 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1799 struct dc_state *state, bool enable)
1801 enum dc_irq_source irq_source;
1802 struct amdgpu_crtc *acrtc;
1806 for (i = 0; i < state->stream_count; i++) {
1807 acrtc = get_crtc_by_otg_inst(
1808 adev, state->stream_status[i].primary_otg_inst);
1810 if (acrtc && state->stream_status[i].plane_count != 0) {
1811 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1812 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1813 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1814 acrtc->crtc_id, enable ? "en" : "dis", rc);
1816 DRM_WARN("Failed to %s pflip interrupts\n",
1817 enable ? "enable" : "disable");
1820 rc = dm_enable_vblank(&acrtc->base);
1822 DRM_WARN("Failed to enable vblank interrupts\n");
1824 dm_disable_vblank(&acrtc->base);
1832 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1834 struct dc_state *context = NULL;
1835 enum dc_status res = DC_ERROR_UNEXPECTED;
1837 struct dc_stream_state *del_streams[MAX_PIPES];
1838 int del_streams_count = 0;
1840 memset(del_streams, 0, sizeof(del_streams));
1842 context = dc_create_state(dc);
1843 if (context == NULL)
1844 goto context_alloc_fail;
1846 dc_resource_state_copy_construct_current(dc, context);
1848 /* First remove from context all streams */
1849 for (i = 0; i < context->stream_count; i++) {
1850 struct dc_stream_state *stream = context->streams[i];
1852 del_streams[del_streams_count++] = stream;
1855 /* Remove all planes for removed streams and then remove the streams */
1856 for (i = 0; i < del_streams_count; i++) {
1857 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1858 res = DC_FAIL_DETACH_SURFACES;
1862 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1868 res = dc_validate_global_state(dc, context, false);
1871 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1875 res = dc_commit_state(dc, context);
1878 dc_release_state(context);
1884 static int dm_suspend(void *handle)
1886 struct amdgpu_device *adev = handle;
1887 struct amdgpu_display_manager *dm = &adev->dm;
1890 if (amdgpu_in_reset(adev)) {
1891 mutex_lock(&dm->dc_lock);
1893 #if defined(CONFIG_DRM_AMD_DC_DCN)
1894 dc_allow_idle_optimizations(adev->dm.dc, false);
1897 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1899 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1901 amdgpu_dm_commit_zero_streams(dm->dc);
1903 amdgpu_dm_irq_suspend(adev);
1908 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1909 amdgpu_dm_crtc_secure_display_suspend(adev);
1911 WARN_ON(adev->dm.cached_state);
1912 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1914 s3_handle_mst(adev_to_drm(adev), true);
1916 amdgpu_dm_irq_suspend(adev);
1919 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1924 static struct amdgpu_dm_connector *
1925 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1926 struct drm_crtc *crtc)
1929 struct drm_connector_state *new_con_state;
1930 struct drm_connector *connector;
1931 struct drm_crtc *crtc_from_state;
1933 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1934 crtc_from_state = new_con_state->crtc;
1936 if (crtc_from_state == crtc)
1937 return to_amdgpu_dm_connector(connector);
1943 static void emulated_link_detect(struct dc_link *link)
1945 struct dc_sink_init_data sink_init_data = { 0 };
1946 struct display_sink_capability sink_caps = { 0 };
1947 enum dc_edid_status edid_status;
1948 struct dc_context *dc_ctx = link->ctx;
1949 struct dc_sink *sink = NULL;
1950 struct dc_sink *prev_sink = NULL;
1952 link->type = dc_connection_none;
1953 prev_sink = link->local_sink;
1956 dc_sink_release(prev_sink);
1958 switch (link->connector_signal) {
1959 case SIGNAL_TYPE_HDMI_TYPE_A: {
1960 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1961 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1965 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1966 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1967 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1971 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1972 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1973 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1977 case SIGNAL_TYPE_LVDS: {
1978 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1979 sink_caps.signal = SIGNAL_TYPE_LVDS;
1983 case SIGNAL_TYPE_EDP: {
1984 sink_caps.transaction_type =
1985 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1986 sink_caps.signal = SIGNAL_TYPE_EDP;
1990 case SIGNAL_TYPE_DISPLAY_PORT: {
1991 sink_caps.transaction_type =
1992 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1993 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1998 DC_ERROR("Invalid connector type! signal:%d\n",
1999 link->connector_signal);
2003 sink_init_data.link = link;
2004 sink_init_data.sink_signal = sink_caps.signal;
2006 sink = dc_sink_create(&sink_init_data);
2008 DC_ERROR("Failed to create sink!\n");
2012 /* dc_sink_create returns a new reference */
2013 link->local_sink = sink;
2015 edid_status = dm_helpers_read_local_edid(
2020 if (edid_status != EDID_OK)
2021 DC_ERROR("Failed to read EDID");
2025 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2026 struct amdgpu_display_manager *dm)
2029 struct dc_surface_update surface_updates[MAX_SURFACES];
2030 struct dc_plane_info plane_infos[MAX_SURFACES];
2031 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2032 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2033 struct dc_stream_update stream_update;
2037 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2040 dm_error("Failed to allocate update bundle\n");
2044 for (k = 0; k < dc_state->stream_count; k++) {
2045 bundle->stream_update.stream = dc_state->streams[k];
2047 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2048 bundle->surface_updates[m].surface =
2049 dc_state->stream_status->plane_states[m];
2050 bundle->surface_updates[m].surface->force_full_update =
2053 dc_commit_updates_for_stream(
2054 dm->dc, bundle->surface_updates,
2055 dc_state->stream_status->plane_count,
2056 dc_state->streams[k], &bundle->stream_update, dc_state);
2065 static void dm_set_dpms_off(struct dc_link *link)
2067 struct dc_stream_state *stream_state;
2068 struct amdgpu_dm_connector *aconnector = link->priv;
2069 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2070 struct dc_stream_update stream_update;
2071 bool dpms_off = true;
2073 memset(&stream_update, 0, sizeof(stream_update));
2074 stream_update.dpms_off = &dpms_off;
2076 mutex_lock(&adev->dm.dc_lock);
2077 stream_state = dc_stream_find_from_link(link);
2079 if (stream_state == NULL) {
2080 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2081 mutex_unlock(&adev->dm.dc_lock);
2085 stream_update.stream = stream_state;
2086 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2087 stream_state, &stream_update,
2088 stream_state->ctx->dc->current_state);
2089 mutex_unlock(&adev->dm.dc_lock);
2092 static int dm_resume(void *handle)
2094 struct amdgpu_device *adev = handle;
2095 struct drm_device *ddev = adev_to_drm(adev);
2096 struct amdgpu_display_manager *dm = &adev->dm;
2097 struct amdgpu_dm_connector *aconnector;
2098 struct drm_connector *connector;
2099 struct drm_connector_list_iter iter;
2100 struct drm_crtc *crtc;
2101 struct drm_crtc_state *new_crtc_state;
2102 struct dm_crtc_state *dm_new_crtc_state;
2103 struct drm_plane *plane;
2104 struct drm_plane_state *new_plane_state;
2105 struct dm_plane_state *dm_new_plane_state;
2106 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2107 enum dc_connection_type new_connection_type = dc_connection_none;
2108 struct dc_state *dc_state;
2111 if (amdgpu_in_reset(adev)) {
2112 dc_state = dm->cached_dc_state;
2114 r = dm_dmub_hw_init(adev);
2116 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2118 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2121 amdgpu_dm_irq_resume_early(adev);
2123 for (i = 0; i < dc_state->stream_count; i++) {
2124 dc_state->streams[i]->mode_changed = true;
2125 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2126 dc_state->stream_status->plane_states[j]->update_flags.raw
2131 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2133 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2135 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2137 dc_release_state(dm->cached_dc_state);
2138 dm->cached_dc_state = NULL;
2140 amdgpu_dm_irq_resume_late(adev);
2142 mutex_unlock(&dm->dc_lock);
2146 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2147 dc_release_state(dm_state->context);
2148 dm_state->context = dc_create_state(dm->dc);
2149 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2150 dc_resource_state_construct(dm->dc, dm_state->context);
2152 /* Before powering on DC we need to re-initialize DMUB. */
2153 r = dm_dmub_hw_init(adev);
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2157 /* power on hardware */
2158 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2160 /* program HPD filter */
2164 * early enable HPD Rx IRQ, should be done before set mode as short
2165 * pulse interrupts are used for MST
2167 amdgpu_dm_irq_resume_early(adev);
2169 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2170 s3_handle_mst(ddev, false);
2173 drm_connector_list_iter_begin(ddev, &iter);
2174 drm_for_each_connector_iter(connector, &iter) {
2175 aconnector = to_amdgpu_dm_connector(connector);
2178 * this is the case when traversing through already created
2179 * MST connectors, should be skipped
2181 if (aconnector->mst_port)
2184 mutex_lock(&aconnector->hpd_lock);
2185 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2186 DRM_ERROR("KMS: Failed to detect connector\n");
2188 if (aconnector->base.force && new_connection_type == dc_connection_none)
2189 emulated_link_detect(aconnector->dc_link);
2191 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2193 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2194 aconnector->fake_enable = false;
2196 if (aconnector->dc_sink)
2197 dc_sink_release(aconnector->dc_sink);
2198 aconnector->dc_sink = NULL;
2199 amdgpu_dm_update_connector_after_detect(aconnector);
2200 mutex_unlock(&aconnector->hpd_lock);
2202 drm_connector_list_iter_end(&iter);
2204 /* Force mode set in atomic commit */
2205 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2206 new_crtc_state->active_changed = true;
2209 * atomic_check is expected to create the dc states. We need to release
2210 * them here, since they were duplicated as part of the suspend
2213 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2214 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2215 if (dm_new_crtc_state->stream) {
2216 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2217 dc_stream_release(dm_new_crtc_state->stream);
2218 dm_new_crtc_state->stream = NULL;
2222 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2223 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2224 if (dm_new_plane_state->dc_state) {
2225 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2226 dc_plane_state_release(dm_new_plane_state->dc_state);
2227 dm_new_plane_state->dc_state = NULL;
2231 drm_atomic_helper_resume(ddev, dm->cached_state);
2233 dm->cached_state = NULL;
2235 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2236 amdgpu_dm_crtc_secure_display_resume(adev);
2239 amdgpu_dm_irq_resume_late(adev);
2241 amdgpu_dm_smu_write_watermarks_table(adev);
2249 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2250 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2251 * the base driver's device list to be initialized and torn down accordingly.
2253 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2256 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2258 .early_init = dm_early_init,
2259 .late_init = dm_late_init,
2260 .sw_init = dm_sw_init,
2261 .sw_fini = dm_sw_fini,
2262 .hw_init = dm_hw_init,
2263 .hw_fini = dm_hw_fini,
2264 .suspend = dm_suspend,
2265 .resume = dm_resume,
2266 .is_idle = dm_is_idle,
2267 .wait_for_idle = dm_wait_for_idle,
2268 .check_soft_reset = dm_check_soft_reset,
2269 .soft_reset = dm_soft_reset,
2270 .set_clockgating_state = dm_set_clockgating_state,
2271 .set_powergating_state = dm_set_powergating_state,
2274 const struct amdgpu_ip_block_version dm_ip_block =
2276 .type = AMD_IP_BLOCK_TYPE_DCE,
2280 .funcs = &amdgpu_dm_funcs,
2290 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2291 .fb_create = amdgpu_display_user_framebuffer_create,
2292 .get_format_info = amd_get_format_info,
2293 .output_poll_changed = drm_fb_helper_output_poll_changed,
2294 .atomic_check = amdgpu_dm_atomic_check,
2295 .atomic_commit = drm_atomic_helper_commit,
2298 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2299 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2302 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2304 u32 max_cll, min_cll, max, min, q, r;
2305 struct amdgpu_dm_backlight_caps *caps;
2306 struct amdgpu_display_manager *dm;
2307 struct drm_connector *conn_base;
2308 struct amdgpu_device *adev;
2309 struct dc_link *link = NULL;
2310 static const u8 pre_computed_values[] = {
2311 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2312 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2314 if (!aconnector || !aconnector->dc_link)
2317 link = aconnector->dc_link;
2318 if (link->connector_signal != SIGNAL_TYPE_EDP)
2321 conn_base = &aconnector->base;
2322 adev = drm_to_adev(conn_base->dev);
2324 caps = &dm->backlight_caps;
2325 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2326 caps->aux_support = false;
2327 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2328 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2330 if (caps->ext_caps->bits.oled == 1 ||
2331 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2332 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2333 caps->aux_support = true;
2335 if (amdgpu_backlight == 0)
2336 caps->aux_support = false;
2337 else if (amdgpu_backlight == 1)
2338 caps->aux_support = true;
2340 /* From the specification (CTA-861-G), for calculating the maximum
2341 * luminance we need to use:
2342 * Luminance = 50*2**(CV/32)
2343 * Where CV is a one-byte value.
2344 * For calculating this expression we may need float point precision;
2345 * to avoid this complexity level, we take advantage that CV is divided
2346 * by a constant. From the Euclids division algorithm, we know that CV
2347 * can be written as: CV = 32*q + r. Next, we replace CV in the
2348 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2349 * need to pre-compute the value of r/32. For pre-computing the values
2350 * We just used the following Ruby line:
2351 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2352 * The results of the above expressions can be verified at
2353 * pre_computed_values.
2357 max = (1 << q) * pre_computed_values[r];
2359 // min luminance: maxLum * (CV/255)^2 / 100
2360 q = DIV_ROUND_CLOSEST(min_cll, 255);
2361 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2363 caps->aux_max_input_signal = max;
2364 caps->aux_min_input_signal = min;
2367 void amdgpu_dm_update_connector_after_detect(
2368 struct amdgpu_dm_connector *aconnector)
2370 struct drm_connector *connector = &aconnector->base;
2371 struct drm_device *dev = connector->dev;
2372 struct dc_sink *sink;
2374 /* MST handled by drm_mst framework */
2375 if (aconnector->mst_mgr.mst_state == true)
2378 sink = aconnector->dc_link->local_sink;
2380 dc_sink_retain(sink);
2383 * Edid mgmt connector gets first update only in mode_valid hook and then
2384 * the connector sink is set to either fake or physical sink depends on link status.
2385 * Skip if already done during boot.
2387 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2388 && aconnector->dc_em_sink) {
2391 * For S3 resume with headless use eml_sink to fake stream
2392 * because on resume connector->sink is set to NULL
2394 mutex_lock(&dev->mode_config.mutex);
2397 if (aconnector->dc_sink) {
2398 amdgpu_dm_update_freesync_caps(connector, NULL);
2400 * retain and release below are used to
2401 * bump up refcount for sink because the link doesn't point
2402 * to it anymore after disconnect, so on next crtc to connector
2403 * reshuffle by UMD we will get into unwanted dc_sink release
2405 dc_sink_release(aconnector->dc_sink);
2407 aconnector->dc_sink = sink;
2408 dc_sink_retain(aconnector->dc_sink);
2409 amdgpu_dm_update_freesync_caps(connector,
2412 amdgpu_dm_update_freesync_caps(connector, NULL);
2413 if (!aconnector->dc_sink) {
2414 aconnector->dc_sink = aconnector->dc_em_sink;
2415 dc_sink_retain(aconnector->dc_sink);
2419 mutex_unlock(&dev->mode_config.mutex);
2422 dc_sink_release(sink);
2427 * TODO: temporary guard to look for proper fix
2428 * if this sink is MST sink, we should not do anything
2430 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2431 dc_sink_release(sink);
2435 if (aconnector->dc_sink == sink) {
2437 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2440 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2441 aconnector->connector_id);
2443 dc_sink_release(sink);
2447 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2448 aconnector->connector_id, aconnector->dc_sink, sink);
2450 mutex_lock(&dev->mode_config.mutex);
2453 * 1. Update status of the drm connector
2454 * 2. Send an event and let userspace tell us what to do
2458 * TODO: check if we still need the S3 mode update workaround.
2459 * If yes, put it here.
2461 if (aconnector->dc_sink) {
2462 amdgpu_dm_update_freesync_caps(connector, NULL);
2463 dc_sink_release(aconnector->dc_sink);
2466 aconnector->dc_sink = sink;
2467 dc_sink_retain(aconnector->dc_sink);
2468 if (sink->dc_edid.length == 0) {
2469 aconnector->edid = NULL;
2470 if (aconnector->dc_link->aux_mode) {
2471 drm_dp_cec_unset_edid(
2472 &aconnector->dm_dp_aux.aux);
2476 (struct edid *)sink->dc_edid.raw_edid;
2478 drm_connector_update_edid_property(connector,
2480 if (aconnector->dc_link->aux_mode)
2481 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2485 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2486 update_connector_ext_caps(aconnector);
2488 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2489 amdgpu_dm_update_freesync_caps(connector, NULL);
2490 drm_connector_update_edid_property(connector, NULL);
2491 aconnector->num_modes = 0;
2492 dc_sink_release(aconnector->dc_sink);
2493 aconnector->dc_sink = NULL;
2494 aconnector->edid = NULL;
2495 #ifdef CONFIG_DRM_AMD_DC_HDCP
2496 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2497 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2498 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2502 mutex_unlock(&dev->mode_config.mutex);
2504 update_subconnector_property(aconnector);
2507 dc_sink_release(sink);
2510 static void handle_hpd_irq(void *param)
2512 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2513 struct drm_connector *connector = &aconnector->base;
2514 struct drm_device *dev = connector->dev;
2515 enum dc_connection_type new_connection_type = dc_connection_none;
2516 #ifdef CONFIG_DRM_AMD_DC_HDCP
2517 struct amdgpu_device *adev = drm_to_adev(dev);
2518 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2522 * In case of failure or MST no need to update connector status or notify the OS
2523 * since (for MST case) MST does this in its own context.
2525 mutex_lock(&aconnector->hpd_lock);
2527 #ifdef CONFIG_DRM_AMD_DC_HDCP
2528 if (adev->dm.hdcp_workqueue) {
2529 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2530 dm_con_state->update_hdcp = true;
2533 if (aconnector->fake_enable)
2534 aconnector->fake_enable = false;
2536 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2537 DRM_ERROR("KMS: Failed to detect connector\n");
2539 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2540 emulated_link_detect(aconnector->dc_link);
2543 drm_modeset_lock_all(dev);
2544 dm_restore_drm_connector_state(dev, connector);
2545 drm_modeset_unlock_all(dev);
2547 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2548 drm_kms_helper_hotplug_event(dev);
2550 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2551 if (new_connection_type == dc_connection_none &&
2552 aconnector->dc_link->type == dc_connection_none)
2553 dm_set_dpms_off(aconnector->dc_link);
2555 amdgpu_dm_update_connector_after_detect(aconnector);
2557 drm_modeset_lock_all(dev);
2558 dm_restore_drm_connector_state(dev, connector);
2559 drm_modeset_unlock_all(dev);
2561 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2562 drm_kms_helper_hotplug_event(dev);
2564 mutex_unlock(&aconnector->hpd_lock);
2568 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2570 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2572 bool new_irq_handled = false;
2574 int dpcd_bytes_to_read;
2576 const int max_process_count = 30;
2577 int process_count = 0;
2579 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2581 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2582 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2583 /* DPCD 0x200 - 0x201 for downstream IRQ */
2584 dpcd_addr = DP_SINK_COUNT;
2586 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2587 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2588 dpcd_addr = DP_SINK_COUNT_ESI;
2591 dret = drm_dp_dpcd_read(
2592 &aconnector->dm_dp_aux.aux,
2595 dpcd_bytes_to_read);
2597 while (dret == dpcd_bytes_to_read &&
2598 process_count < max_process_count) {
2604 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2605 /* handle HPD short pulse irq */
2606 if (aconnector->mst_mgr.mst_state)
2608 &aconnector->mst_mgr,
2612 if (new_irq_handled) {
2613 /* ACK at DPCD to notify down stream */
2614 const int ack_dpcd_bytes_to_write =
2615 dpcd_bytes_to_read - 1;
2617 for (retry = 0; retry < 3; retry++) {
2620 wret = drm_dp_dpcd_write(
2621 &aconnector->dm_dp_aux.aux,
2624 ack_dpcd_bytes_to_write);
2625 if (wret == ack_dpcd_bytes_to_write)
2629 /* check if there is new irq to be handled */
2630 dret = drm_dp_dpcd_read(
2631 &aconnector->dm_dp_aux.aux,
2634 dpcd_bytes_to_read);
2636 new_irq_handled = false;
2642 if (process_count == max_process_count)
2643 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2646 static void handle_hpd_rx_irq(void *param)
2648 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2649 struct drm_connector *connector = &aconnector->base;
2650 struct drm_device *dev = connector->dev;
2651 struct dc_link *dc_link = aconnector->dc_link;
2652 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2653 bool result = false;
2654 enum dc_connection_type new_connection_type = dc_connection_none;
2655 struct amdgpu_device *adev = drm_to_adev(dev);
2656 union hpd_irq_data hpd_irq_data;
2658 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2661 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2662 * conflict, after implement i2c helper, this mutex should be
2665 if (dc_link->type != dc_connection_mst_branch)
2666 mutex_lock(&aconnector->hpd_lock);
2668 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2670 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2671 (dc_link->type == dc_connection_mst_branch)) {
2672 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2674 dm_handle_hpd_rx_irq(aconnector);
2676 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2678 dm_handle_hpd_rx_irq(aconnector);
2683 mutex_lock(&adev->dm.dc_lock);
2684 #ifdef CONFIG_DRM_AMD_DC_HDCP
2685 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2687 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2689 mutex_unlock(&adev->dm.dc_lock);
2692 if (result && !is_mst_root_connector) {
2693 /* Downstream Port status changed. */
2694 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2695 DRM_ERROR("KMS: Failed to detect connector\n");
2697 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2698 emulated_link_detect(dc_link);
2700 if (aconnector->fake_enable)
2701 aconnector->fake_enable = false;
2703 amdgpu_dm_update_connector_after_detect(aconnector);
2706 drm_modeset_lock_all(dev);
2707 dm_restore_drm_connector_state(dev, connector);
2708 drm_modeset_unlock_all(dev);
2710 drm_kms_helper_hotplug_event(dev);
2711 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2713 if (aconnector->fake_enable)
2714 aconnector->fake_enable = false;
2716 amdgpu_dm_update_connector_after_detect(aconnector);
2719 drm_modeset_lock_all(dev);
2720 dm_restore_drm_connector_state(dev, connector);
2721 drm_modeset_unlock_all(dev);
2723 drm_kms_helper_hotplug_event(dev);
2726 #ifdef CONFIG_DRM_AMD_DC_HDCP
2727 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2728 if (adev->dm.hdcp_workqueue)
2729 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2733 if (dc_link->type != dc_connection_mst_branch) {
2734 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2735 mutex_unlock(&aconnector->hpd_lock);
2739 static void register_hpd_handlers(struct amdgpu_device *adev)
2741 struct drm_device *dev = adev_to_drm(adev);
2742 struct drm_connector *connector;
2743 struct amdgpu_dm_connector *aconnector;
2744 const struct dc_link *dc_link;
2745 struct dc_interrupt_params int_params = {0};
2747 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2748 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2750 list_for_each_entry(connector,
2751 &dev->mode_config.connector_list, head) {
2753 aconnector = to_amdgpu_dm_connector(connector);
2754 dc_link = aconnector->dc_link;
2756 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2757 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2758 int_params.irq_source = dc_link->irq_source_hpd;
2760 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2762 (void *) aconnector);
2765 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2767 /* Also register for DP short pulse (hpd_rx). */
2768 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2769 int_params.irq_source = dc_link->irq_source_hpd_rx;
2771 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2773 (void *) aconnector);
2778 #if defined(CONFIG_DRM_AMD_DC_SI)
2779 /* Register IRQ sources and initialize IRQ callbacks */
2780 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2782 struct dc *dc = adev->dm.dc;
2783 struct common_irq_params *c_irq_params;
2784 struct dc_interrupt_params int_params = {0};
2787 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2789 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2790 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2793 * Actions of amdgpu_irq_add_id():
2794 * 1. Register a set() function with base driver.
2795 * Base driver will call set() function to enable/disable an
2796 * interrupt in DC hardware.
2797 * 2. Register amdgpu_dm_irq_handler().
2798 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2799 * coming from DC hardware.
2800 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2801 * for acknowledging and handling. */
2803 /* Use VBLANK interrupt */
2804 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2805 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2807 DRM_ERROR("Failed to add crtc irq id!\n");
2811 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812 int_params.irq_source =
2813 dc_interrupt_to_irq_source(dc, i+1 , 0);
2815 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2817 c_irq_params->adev = adev;
2818 c_irq_params->irq_src = int_params.irq_source;
2820 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821 dm_crtc_high_irq, c_irq_params);
2824 /* Use GRPH_PFLIP interrupt */
2825 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2826 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2827 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2829 DRM_ERROR("Failed to add page flip irq id!\n");
2833 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2834 int_params.irq_source =
2835 dc_interrupt_to_irq_source(dc, i, 0);
2837 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2839 c_irq_params->adev = adev;
2840 c_irq_params->irq_src = int_params.irq_source;
2842 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2843 dm_pflip_high_irq, c_irq_params);
2848 r = amdgpu_irq_add_id(adev, client_id,
2849 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2851 DRM_ERROR("Failed to add hpd irq id!\n");
2855 register_hpd_handlers(adev);
2861 /* Register IRQ sources and initialize IRQ callbacks */
2862 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2864 struct dc *dc = adev->dm.dc;
2865 struct common_irq_params *c_irq_params;
2866 struct dc_interrupt_params int_params = {0};
2869 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2871 if (adev->asic_type >= CHIP_VEGA10)
2872 client_id = SOC15_IH_CLIENTID_DCE;
2874 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2875 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2878 * Actions of amdgpu_irq_add_id():
2879 * 1. Register a set() function with base driver.
2880 * Base driver will call set() function to enable/disable an
2881 * interrupt in DC hardware.
2882 * 2. Register amdgpu_dm_irq_handler().
2883 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2884 * coming from DC hardware.
2885 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2886 * for acknowledging and handling. */
2888 /* Use VBLANK interrupt */
2889 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2890 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2892 DRM_ERROR("Failed to add crtc irq id!\n");
2896 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2897 int_params.irq_source =
2898 dc_interrupt_to_irq_source(dc, i, 0);
2900 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2902 c_irq_params->adev = adev;
2903 c_irq_params->irq_src = int_params.irq_source;
2905 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2906 dm_crtc_high_irq, c_irq_params);
2909 /* Use VUPDATE interrupt */
2910 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2911 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2913 DRM_ERROR("Failed to add vupdate irq id!\n");
2917 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918 int_params.irq_source =
2919 dc_interrupt_to_irq_source(dc, i, 0);
2921 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2923 c_irq_params->adev = adev;
2924 c_irq_params->irq_src = int_params.irq_source;
2926 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2927 dm_vupdate_high_irq, c_irq_params);
2930 /* Use GRPH_PFLIP interrupt */
2931 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2932 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2933 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2935 DRM_ERROR("Failed to add page flip irq id!\n");
2939 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2940 int_params.irq_source =
2941 dc_interrupt_to_irq_source(dc, i, 0);
2943 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2945 c_irq_params->adev = adev;
2946 c_irq_params->irq_src = int_params.irq_source;
2948 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2949 dm_pflip_high_irq, c_irq_params);
2954 r = amdgpu_irq_add_id(adev, client_id,
2955 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2957 DRM_ERROR("Failed to add hpd irq id!\n");
2961 register_hpd_handlers(adev);
2966 #if defined(CONFIG_DRM_AMD_DC_DCN)
2967 /* Register IRQ sources and initialize IRQ callbacks */
2968 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2970 struct dc *dc = adev->dm.dc;
2971 struct common_irq_params *c_irq_params;
2972 struct dc_interrupt_params int_params = {0};
2976 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2977 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2980 * Actions of amdgpu_irq_add_id():
2981 * 1. Register a set() function with base driver.
2982 * Base driver will call set() function to enable/disable an
2983 * interrupt in DC hardware.
2984 * 2. Register amdgpu_dm_irq_handler().
2985 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2986 * coming from DC hardware.
2987 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2988 * for acknowledging and handling.
2991 /* Use VSTARTUP interrupt */
2992 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2993 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2995 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2998 DRM_ERROR("Failed to add crtc irq id!\n");
3002 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3003 int_params.irq_source =
3004 dc_interrupt_to_irq_source(dc, i, 0);
3006 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3008 c_irq_params->adev = adev;
3009 c_irq_params->irq_src = int_params.irq_source;
3011 amdgpu_dm_irq_register_interrupt(
3012 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3015 /* Use otg vertical line interrupt */
3016 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3017 for (i = DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL;
3018 i <= DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL
3019 + adev->mode_info.num_crtc - 1;
3021 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vline0_irq);
3024 DRM_ERROR("Failed to add vline0 irq id!\n");
3028 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3029 int_params.irq_source =
3030 dc_interrupt_to_irq_source(dc, i, 0);
3032 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3033 - DC_IRQ_SOURCE_DC1_VLINE0];
3035 c_irq_params->adev = adev;
3036 c_irq_params->irq_src = int_params.irq_source;
3038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3043 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3044 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3045 * to trigger at end of each vblank, regardless of state of the lock,
3046 * matching DCE behaviour.
3048 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3049 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3051 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3054 DRM_ERROR("Failed to add vupdate irq id!\n");
3058 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059 int_params.irq_source =
3060 dc_interrupt_to_irq_source(dc, i, 0);
3062 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3064 c_irq_params->adev = adev;
3065 c_irq_params->irq_src = int_params.irq_source;
3067 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3068 dm_vupdate_high_irq, c_irq_params);
3071 /* Use GRPH_PFLIP interrupt */
3072 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3073 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3075 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3077 DRM_ERROR("Failed to add page flip irq id!\n");
3081 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3082 int_params.irq_source =
3083 dc_interrupt_to_irq_source(dc, i, 0);
3085 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3087 c_irq_params->adev = adev;
3088 c_irq_params->irq_src = int_params.irq_source;
3090 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 dm_pflip_high_irq, c_irq_params);
3096 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3099 DRM_ERROR("Failed to add hpd irq id!\n");
3103 register_hpd_handlers(adev);
3110 * Acquires the lock for the atomic state object and returns
3111 * the new atomic state.
3113 * This should only be called during atomic check.
3115 static int dm_atomic_get_state(struct drm_atomic_state *state,
3116 struct dm_atomic_state **dm_state)
3118 struct drm_device *dev = state->dev;
3119 struct amdgpu_device *adev = drm_to_adev(dev);
3120 struct amdgpu_display_manager *dm = &adev->dm;
3121 struct drm_private_state *priv_state;
3126 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3127 if (IS_ERR(priv_state))
3128 return PTR_ERR(priv_state);
3130 *dm_state = to_dm_atomic_state(priv_state);
3135 static struct dm_atomic_state *
3136 dm_atomic_get_new_state(struct drm_atomic_state *state)
3138 struct drm_device *dev = state->dev;
3139 struct amdgpu_device *adev = drm_to_adev(dev);
3140 struct amdgpu_display_manager *dm = &adev->dm;
3141 struct drm_private_obj *obj;
3142 struct drm_private_state *new_obj_state;
3145 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3146 if (obj->funcs == dm->atomic_obj.funcs)
3147 return to_dm_atomic_state(new_obj_state);
3153 static struct drm_private_state *
3154 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3156 struct dm_atomic_state *old_state, *new_state;
3158 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3162 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3164 old_state = to_dm_atomic_state(obj->state);
3166 if (old_state && old_state->context)
3167 new_state->context = dc_copy_state(old_state->context);
3169 if (!new_state->context) {
3174 return &new_state->base;
3177 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3178 struct drm_private_state *state)
3180 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3182 if (dm_state && dm_state->context)
3183 dc_release_state(dm_state->context);
3188 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3189 .atomic_duplicate_state = dm_atomic_duplicate_state,
3190 .atomic_destroy_state = dm_atomic_destroy_state,
3193 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3195 struct dm_atomic_state *state;
3198 adev->mode_info.mode_config_initialized = true;
3200 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3201 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3203 adev_to_drm(adev)->mode_config.max_width = 16384;
3204 adev_to_drm(adev)->mode_config.max_height = 16384;
3206 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3207 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3208 /* indicates support for immediate flip */
3209 adev_to_drm(adev)->mode_config.async_page_flip = true;
3211 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3213 state = kzalloc(sizeof(*state), GFP_KERNEL);
3217 state->context = dc_create_state(adev->dm.dc);
3218 if (!state->context) {
3223 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3225 drm_atomic_private_obj_init(adev_to_drm(adev),
3226 &adev->dm.atomic_obj,
3228 &dm_atomic_state_funcs);
3230 r = amdgpu_display_modeset_create_props(adev);
3232 dc_release_state(state->context);
3237 r = amdgpu_dm_audio_init(adev);
3239 dc_release_state(state->context);
3247 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3248 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3249 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3251 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3252 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3254 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3256 #if defined(CONFIG_ACPI)
3257 struct amdgpu_dm_backlight_caps caps;
3259 memset(&caps, 0, sizeof(caps));
3261 if (dm->backlight_caps.caps_valid)
3264 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3265 if (caps.caps_valid) {
3266 dm->backlight_caps.caps_valid = true;
3267 if (caps.aux_support)
3269 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3270 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3272 dm->backlight_caps.min_input_signal =
3273 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3274 dm->backlight_caps.max_input_signal =
3275 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3278 if (dm->backlight_caps.aux_support)
3281 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3282 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3286 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3287 unsigned *min, unsigned *max)
3292 if (caps->aux_support) {
3293 // Firmware limits are in nits, DC API wants millinits.
3294 *max = 1000 * caps->aux_max_input_signal;
3295 *min = 1000 * caps->aux_min_input_signal;
3297 // Firmware limits are 8-bit, PWM control is 16-bit.
3298 *max = 0x101 * caps->max_input_signal;
3299 *min = 0x101 * caps->min_input_signal;
3304 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3305 uint32_t brightness)
3309 if (!get_brightness_range(caps, &min, &max))
3312 // Rescale 0..255 to min..max
3313 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3314 AMDGPU_MAX_BL_LEVEL);
3317 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3318 uint32_t brightness)
3322 if (!get_brightness_range(caps, &min, &max))
3325 if (brightness < min)
3327 // Rescale min..max to 0..255
3328 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3332 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3334 struct amdgpu_display_manager *dm = bl_get_data(bd);
3335 struct amdgpu_dm_backlight_caps caps;
3336 struct dc_link *link = NULL;
3340 amdgpu_dm_update_backlight_caps(dm);
3341 caps = dm->backlight_caps;
3343 link = (struct dc_link *)dm->backlight_link;
3345 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3346 // Change brightness based on AUX property
3347 if (caps.aux_support)
3348 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3349 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3351 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3356 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3358 struct amdgpu_display_manager *dm = bl_get_data(bd);
3359 struct amdgpu_dm_backlight_caps caps;
3361 amdgpu_dm_update_backlight_caps(dm);
3362 caps = dm->backlight_caps;
3364 if (caps.aux_support) {
3365 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3369 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3371 return bd->props.brightness;
3372 return convert_brightness_to_user(&caps, avg);
3374 int ret = dc_link_get_backlight_level(dm->backlight_link);
3376 if (ret == DC_ERROR_UNEXPECTED)
3377 return bd->props.brightness;
3378 return convert_brightness_to_user(&caps, ret);
3382 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3383 .options = BL_CORE_SUSPENDRESUME,
3384 .get_brightness = amdgpu_dm_backlight_get_brightness,
3385 .update_status = amdgpu_dm_backlight_update_status,
3389 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3392 struct backlight_properties props = { 0 };
3394 amdgpu_dm_update_backlight_caps(dm);
3396 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3397 props.brightness = AMDGPU_MAX_BL_LEVEL;
3398 props.type = BACKLIGHT_RAW;
3400 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3401 adev_to_drm(dm->adev)->primary->index);
3403 dm->backlight_dev = backlight_device_register(bl_name,
3404 adev_to_drm(dm->adev)->dev,
3406 &amdgpu_dm_backlight_ops,
3409 if (IS_ERR(dm->backlight_dev))
3410 DRM_ERROR("DM: Backlight registration failed!\n");
3412 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3417 static int initialize_plane(struct amdgpu_display_manager *dm,
3418 struct amdgpu_mode_info *mode_info, int plane_id,
3419 enum drm_plane_type plane_type,
3420 const struct dc_plane_cap *plane_cap)
3422 struct drm_plane *plane;
3423 unsigned long possible_crtcs;
3426 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3428 DRM_ERROR("KMS: Failed to allocate plane\n");
3431 plane->type = plane_type;
3434 * HACK: IGT tests expect that the primary plane for a CRTC
3435 * can only have one possible CRTC. Only expose support for
3436 * any CRTC if they're not going to be used as a primary plane
3437 * for a CRTC - like overlay or underlay planes.
3439 possible_crtcs = 1 << plane_id;
3440 if (plane_id >= dm->dc->caps.max_streams)
3441 possible_crtcs = 0xff;
3443 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3446 DRM_ERROR("KMS: Failed to initialize plane\n");
3452 mode_info->planes[plane_id] = plane;
3458 static void register_backlight_device(struct amdgpu_display_manager *dm,
3459 struct dc_link *link)
3461 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3462 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3464 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3465 link->type != dc_connection_none) {
3467 * Event if registration failed, we should continue with
3468 * DM initialization because not having a backlight control
3469 * is better then a black screen.
3471 amdgpu_dm_register_backlight_device(dm);
3473 if (dm->backlight_dev)
3474 dm->backlight_link = link;
3481 * In this architecture, the association
3482 * connector -> encoder -> crtc
3483 * id not really requried. The crtc and connector will hold the
3484 * display_index as an abstraction to use with DAL component
3486 * Returns 0 on success
3488 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3490 struct amdgpu_display_manager *dm = &adev->dm;
3492 struct amdgpu_dm_connector *aconnector = NULL;
3493 struct amdgpu_encoder *aencoder = NULL;
3494 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3496 int32_t primary_planes;
3497 enum dc_connection_type new_connection_type = dc_connection_none;
3498 const struct dc_plane_cap *plane;
3500 dm->display_indexes_num = dm->dc->caps.max_streams;
3501 /* Update the actual used number of crtc */
3502 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3504 link_cnt = dm->dc->caps.max_links;
3505 if (amdgpu_dm_mode_config_init(dm->adev)) {
3506 DRM_ERROR("DM: Failed to initialize mode config\n");
3510 /* There is one primary plane per CRTC */
3511 primary_planes = dm->dc->caps.max_streams;
3512 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3515 * Initialize primary planes, implicit planes for legacy IOCTLS.
3516 * Order is reversed to match iteration order in atomic check.
3518 for (i = (primary_planes - 1); i >= 0; i--) {
3519 plane = &dm->dc->caps.planes[i];
3521 if (initialize_plane(dm, mode_info, i,
3522 DRM_PLANE_TYPE_PRIMARY, plane)) {
3523 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3529 * Initialize overlay planes, index starting after primary planes.
3530 * These planes have a higher DRM index than the primary planes since
3531 * they should be considered as having a higher z-order.
3532 * Order is reversed to match iteration order in atomic check.
3534 * Only support DCN for now, and only expose one so we don't encourage
3535 * userspace to use up all the pipes.
3537 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3538 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3540 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3543 if (!plane->blends_with_above || !plane->blends_with_below)
3546 if (!plane->pixel_format_support.argb8888)
3549 if (initialize_plane(dm, NULL, primary_planes + i,
3550 DRM_PLANE_TYPE_OVERLAY, plane)) {
3551 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3555 /* Only create one overlay plane. */
3559 for (i = 0; i < dm->dc->caps.max_streams; i++)
3560 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3561 DRM_ERROR("KMS: Failed to initialize crtc\n");
3565 /* loops over all connectors on the board */
3566 for (i = 0; i < link_cnt; i++) {
3567 struct dc_link *link = NULL;
3569 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3571 "KMS: Cannot support more than %d display indexes\n",
3572 AMDGPU_DM_MAX_DISPLAY_INDEX);
3576 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3580 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3584 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3585 DRM_ERROR("KMS: Failed to initialize encoder\n");
3589 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3590 DRM_ERROR("KMS: Failed to initialize connector\n");
3594 link = dc_get_link_at_index(dm->dc, i);
3596 if (!dc_link_detect_sink(link, &new_connection_type))
3597 DRM_ERROR("KMS: Failed to detect connector\n");
3599 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3600 emulated_link_detect(link);
3601 amdgpu_dm_update_connector_after_detect(aconnector);
3603 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3604 amdgpu_dm_update_connector_after_detect(aconnector);
3605 register_backlight_device(dm, link);
3606 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3607 amdgpu_dm_set_psr_caps(link);
3613 /* Software is initialized. Now we can register interrupt handlers. */
3614 switch (adev->asic_type) {
3615 #if defined(CONFIG_DRM_AMD_DC_SI)
3620 if (dce60_register_irq_handlers(dm->adev)) {
3621 DRM_ERROR("DM: Failed to initialize IRQ\n");
3635 case CHIP_POLARIS11:
3636 case CHIP_POLARIS10:
3637 case CHIP_POLARIS12:
3642 if (dce110_register_irq_handlers(dm->adev)) {
3643 DRM_ERROR("DM: Failed to initialize IRQ\n");
3647 #if defined(CONFIG_DRM_AMD_DC_DCN)
3653 case CHIP_SIENNA_CICHLID:
3654 case CHIP_NAVY_FLOUNDER:
3655 case CHIP_DIMGREY_CAVEFISH:
3657 if (dcn10_register_irq_handlers(dm->adev)) {
3658 DRM_ERROR("DM: Failed to initialize IRQ\n");
3664 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3676 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3678 drm_mode_config_cleanup(dm->ddev);
3679 drm_atomic_private_obj_fini(&dm->atomic_obj);
3683 /******************************************************************************
3684 * amdgpu_display_funcs functions
3685 *****************************************************************************/
3688 * dm_bandwidth_update - program display watermarks
3690 * @adev: amdgpu_device pointer
3692 * Calculate and program the display watermarks and line buffer allocation.
3694 static void dm_bandwidth_update(struct amdgpu_device *adev)
3696 /* TODO: implement later */
3699 static const struct amdgpu_display_funcs dm_display_funcs = {
3700 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3701 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3702 .backlight_set_level = NULL, /* never called for DC */
3703 .backlight_get_level = NULL, /* never called for DC */
3704 .hpd_sense = NULL,/* called unconditionally */
3705 .hpd_set_polarity = NULL, /* called unconditionally */
3706 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3707 .page_flip_get_scanoutpos =
3708 dm_crtc_get_scanoutpos,/* called unconditionally */
3709 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3710 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3713 #if defined(CONFIG_DEBUG_KERNEL_DC)
3715 static ssize_t s3_debug_store(struct device *device,
3716 struct device_attribute *attr,
3722 struct drm_device *drm_dev = dev_get_drvdata(device);
3723 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3725 ret = kstrtoint(buf, 0, &s3_state);
3730 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3735 return ret == 0 ? count : 0;
3738 DEVICE_ATTR_WO(s3_debug);
3742 static int dm_early_init(void *handle)
3744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3746 switch (adev->asic_type) {
3747 #if defined(CONFIG_DRM_AMD_DC_SI)
3751 adev->mode_info.num_crtc = 6;
3752 adev->mode_info.num_hpd = 6;
3753 adev->mode_info.num_dig = 6;
3756 adev->mode_info.num_crtc = 2;
3757 adev->mode_info.num_hpd = 2;
3758 adev->mode_info.num_dig = 2;
3763 adev->mode_info.num_crtc = 6;
3764 adev->mode_info.num_hpd = 6;
3765 adev->mode_info.num_dig = 6;
3768 adev->mode_info.num_crtc = 4;
3769 adev->mode_info.num_hpd = 6;
3770 adev->mode_info.num_dig = 7;
3774 adev->mode_info.num_crtc = 2;
3775 adev->mode_info.num_hpd = 6;
3776 adev->mode_info.num_dig = 6;
3780 adev->mode_info.num_crtc = 6;
3781 adev->mode_info.num_hpd = 6;
3782 adev->mode_info.num_dig = 7;
3785 adev->mode_info.num_crtc = 3;
3786 adev->mode_info.num_hpd = 6;
3787 adev->mode_info.num_dig = 9;
3790 adev->mode_info.num_crtc = 2;
3791 adev->mode_info.num_hpd = 6;
3792 adev->mode_info.num_dig = 9;
3794 case CHIP_POLARIS11:
3795 case CHIP_POLARIS12:
3796 adev->mode_info.num_crtc = 5;
3797 adev->mode_info.num_hpd = 5;
3798 adev->mode_info.num_dig = 5;
3800 case CHIP_POLARIS10:
3802 adev->mode_info.num_crtc = 6;
3803 adev->mode_info.num_hpd = 6;
3804 adev->mode_info.num_dig = 6;
3809 adev->mode_info.num_crtc = 6;
3810 adev->mode_info.num_hpd = 6;
3811 adev->mode_info.num_dig = 6;
3813 #if defined(CONFIG_DRM_AMD_DC_DCN)
3817 adev->mode_info.num_crtc = 4;
3818 adev->mode_info.num_hpd = 4;
3819 adev->mode_info.num_dig = 4;
3823 case CHIP_SIENNA_CICHLID:
3824 case CHIP_NAVY_FLOUNDER:
3825 adev->mode_info.num_crtc = 6;
3826 adev->mode_info.num_hpd = 6;
3827 adev->mode_info.num_dig = 6;
3830 case CHIP_DIMGREY_CAVEFISH:
3831 adev->mode_info.num_crtc = 5;
3832 adev->mode_info.num_hpd = 5;
3833 adev->mode_info.num_dig = 5;
3837 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3841 amdgpu_dm_set_irq_funcs(adev);
3843 if (adev->mode_info.funcs == NULL)
3844 adev->mode_info.funcs = &dm_display_funcs;
3847 * Note: Do NOT change adev->audio_endpt_rreg and
3848 * adev->audio_endpt_wreg because they are initialised in
3849 * amdgpu_device_init()
3851 #if defined(CONFIG_DEBUG_KERNEL_DC)
3853 adev_to_drm(adev)->dev,
3854 &dev_attr_s3_debug);
3860 static bool modeset_required(struct drm_crtc_state *crtc_state,
3861 struct dc_stream_state *new_stream,
3862 struct dc_stream_state *old_stream)
3864 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3867 static bool modereset_required(struct drm_crtc_state *crtc_state)
3869 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3872 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3874 drm_encoder_cleanup(encoder);
3878 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3879 .destroy = amdgpu_dm_encoder_destroy,
3883 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3884 struct drm_framebuffer *fb,
3885 int *min_downscale, int *max_upscale)
3887 struct amdgpu_device *adev = drm_to_adev(dev);
3888 struct dc *dc = adev->dm.dc;
3889 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3890 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3892 switch (fb->format->format) {
3893 case DRM_FORMAT_P010:
3894 case DRM_FORMAT_NV12:
3895 case DRM_FORMAT_NV21:
3896 *max_upscale = plane_cap->max_upscale_factor.nv12;
3897 *min_downscale = plane_cap->max_downscale_factor.nv12;
3900 case DRM_FORMAT_XRGB16161616F:
3901 case DRM_FORMAT_ARGB16161616F:
3902 case DRM_FORMAT_XBGR16161616F:
3903 case DRM_FORMAT_ABGR16161616F:
3904 *max_upscale = plane_cap->max_upscale_factor.fp16;
3905 *min_downscale = plane_cap->max_downscale_factor.fp16;
3909 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3910 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3915 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3916 * scaling factor of 1.0 == 1000 units.
3918 if (*max_upscale == 1)
3919 *max_upscale = 1000;
3921 if (*min_downscale == 1)
3922 *min_downscale = 1000;
3926 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3927 struct dc_scaling_info *scaling_info)
3929 int scale_w, scale_h, min_downscale, max_upscale;
3931 memset(scaling_info, 0, sizeof(*scaling_info));
3933 /* Source is fixed 16.16 but we ignore mantissa for now... */
3934 scaling_info->src_rect.x = state->src_x >> 16;
3935 scaling_info->src_rect.y = state->src_y >> 16;
3937 scaling_info->src_rect.width = state->src_w >> 16;
3938 if (scaling_info->src_rect.width == 0)
3941 scaling_info->src_rect.height = state->src_h >> 16;
3942 if (scaling_info->src_rect.height == 0)
3945 scaling_info->dst_rect.x = state->crtc_x;
3946 scaling_info->dst_rect.y = state->crtc_y;
3948 if (state->crtc_w == 0)
3951 scaling_info->dst_rect.width = state->crtc_w;
3953 if (state->crtc_h == 0)
3956 scaling_info->dst_rect.height = state->crtc_h;
3958 /* DRM doesn't specify clipping on destination output. */
3959 scaling_info->clip_rect = scaling_info->dst_rect;
3961 /* Validate scaling per-format with DC plane caps */
3962 if (state->plane && state->plane->dev && state->fb) {
3963 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3964 &min_downscale, &max_upscale);
3966 min_downscale = 250;
3967 max_upscale = 16000;
3970 scale_w = scaling_info->dst_rect.width * 1000 /
3971 scaling_info->src_rect.width;
3973 if (scale_w < min_downscale || scale_w > max_upscale)
3976 scale_h = scaling_info->dst_rect.height * 1000 /
3977 scaling_info->src_rect.height;
3979 if (scale_h < min_downscale || scale_h > max_upscale)
3983 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3984 * assume reasonable defaults based on the format.
3991 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3992 uint64_t tiling_flags)
3994 /* Fill GFX8 params */
3995 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3996 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3998 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3999 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4000 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4001 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4002 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4004 /* XXX fix me for VI */
4005 tiling_info->gfx8.num_banks = num_banks;
4006 tiling_info->gfx8.array_mode =
4007 DC_ARRAY_2D_TILED_THIN1;
4008 tiling_info->gfx8.tile_split = tile_split;
4009 tiling_info->gfx8.bank_width = bankw;
4010 tiling_info->gfx8.bank_height = bankh;
4011 tiling_info->gfx8.tile_aspect = mtaspect;
4012 tiling_info->gfx8.tile_mode =
4013 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4014 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4015 == DC_ARRAY_1D_TILED_THIN1) {
4016 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4019 tiling_info->gfx8.pipe_config =
4020 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4024 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4025 union dc_tiling_info *tiling_info)
4027 tiling_info->gfx9.num_pipes =
4028 adev->gfx.config.gb_addr_config_fields.num_pipes;
4029 tiling_info->gfx9.num_banks =
4030 adev->gfx.config.gb_addr_config_fields.num_banks;
4031 tiling_info->gfx9.pipe_interleave =
4032 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4033 tiling_info->gfx9.num_shader_engines =
4034 adev->gfx.config.gb_addr_config_fields.num_se;
4035 tiling_info->gfx9.max_compressed_frags =
4036 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4037 tiling_info->gfx9.num_rb_per_se =
4038 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4039 tiling_info->gfx9.shaderEnable = 1;
4040 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4041 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4042 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4043 adev->asic_type == CHIP_VANGOGH)
4044 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4048 validate_dcc(struct amdgpu_device *adev,
4049 const enum surface_pixel_format format,
4050 const enum dc_rotation_angle rotation,
4051 const union dc_tiling_info *tiling_info,
4052 const struct dc_plane_dcc_param *dcc,
4053 const struct dc_plane_address *address,
4054 const struct plane_size *plane_size)
4056 struct dc *dc = adev->dm.dc;
4057 struct dc_dcc_surface_param input;
4058 struct dc_surface_dcc_cap output;
4060 memset(&input, 0, sizeof(input));
4061 memset(&output, 0, sizeof(output));
4066 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4067 !dc->cap_funcs.get_dcc_compression_cap)
4070 input.format = format;
4071 input.surface_size.width = plane_size->surface_size.width;
4072 input.surface_size.height = plane_size->surface_size.height;
4073 input.swizzle_mode = tiling_info->gfx9.swizzle;
4075 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4076 input.scan = SCAN_DIRECTION_HORIZONTAL;
4077 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4078 input.scan = SCAN_DIRECTION_VERTICAL;
4080 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4083 if (!output.capable)
4086 if (dcc->independent_64b_blks == 0 &&
4087 output.grph.rgb.independent_64b_blks != 0)
4094 modifier_has_dcc(uint64_t modifier)
4096 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4100 modifier_gfx9_swizzle_mode(uint64_t modifier)
4102 if (modifier == DRM_FORMAT_MOD_LINEAR)
4105 return AMD_FMT_MOD_GET(TILE, modifier);
4108 static const struct drm_format_info *
4109 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4111 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4115 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4116 union dc_tiling_info *tiling_info,
4119 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4120 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4121 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4122 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4124 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4126 if (!IS_AMD_FMT_MOD(modifier))
4129 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4130 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4132 if (adev->family >= AMDGPU_FAMILY_NV) {
4133 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4135 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4137 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4141 enum dm_micro_swizzle {
4142 MICRO_SWIZZLE_Z = 0,
4143 MICRO_SWIZZLE_S = 1,
4144 MICRO_SWIZZLE_D = 2,
4148 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4152 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4153 const struct drm_format_info *info = drm_format_info(format);
4155 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4161 * We always have to allow this modifier, because core DRM still
4162 * checks LINEAR support if userspace does not provide modifers.
4164 if (modifier == DRM_FORMAT_MOD_LINEAR)
4168 * The arbitrary tiling support for multiplane formats has not been hooked
4171 if (info->num_planes > 1)
4175 * For D swizzle the canonical modifier depends on the bpp, so check
4178 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4179 adev->family >= AMDGPU_FAMILY_NV) {
4180 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4184 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4188 if (modifier_has_dcc(modifier)) {
4189 /* Per radeonsi comments 16/64 bpp are more complicated. */
4190 if (info->cpp[0] != 4)
4198 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4203 if (*cap - *size < 1) {
4204 uint64_t new_cap = *cap * 2;
4205 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4213 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4219 (*mods)[*size] = mod;
4224 add_gfx9_modifiers(const struct amdgpu_device *adev,
4225 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4227 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4228 int pipe_xor_bits = min(8, pipes +
4229 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4230 int bank_xor_bits = min(8 - pipe_xor_bits,
4231 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4232 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4233 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4236 if (adev->family == AMDGPU_FAMILY_RV) {
4237 /* Raven2 and later */
4238 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4241 * No _D DCC swizzles yet because we only allow 32bpp, which
4242 * doesn't support _D on DCN
4245 if (has_constant_encode) {
4246 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4247 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4248 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4249 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4250 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4251 AMD_FMT_MOD_SET(DCC, 1) |
4252 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4253 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4254 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4257 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4258 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4259 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4260 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4261 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4262 AMD_FMT_MOD_SET(DCC, 1) |
4263 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4264 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4265 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4267 if (has_constant_encode) {
4268 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4269 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4270 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4271 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4272 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4273 AMD_FMT_MOD_SET(DCC, 1) |
4274 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4275 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4276 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4278 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4279 AMD_FMT_MOD_SET(RB, rb) |
4280 AMD_FMT_MOD_SET(PIPE, pipes));
4283 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4284 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4285 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4286 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4287 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4288 AMD_FMT_MOD_SET(DCC, 1) |
4289 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4290 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4291 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4292 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4293 AMD_FMT_MOD_SET(RB, rb) |
4294 AMD_FMT_MOD_SET(PIPE, pipes));
4298 * Only supported for 64bpp on Raven, will be filtered on format in
4299 * dm_plane_format_mod_supported.
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4304 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4307 if (adev->family == AMDGPU_FAMILY_RV) {
4308 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4309 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4310 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4311 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4312 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4316 * Only supported for 64bpp on Raven, will be filtered on format in
4317 * dm_plane_format_mod_supported.
4319 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4321 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4323 if (adev->family == AMDGPU_FAMILY_RV) {
4324 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4325 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4326 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4331 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4332 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4334 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4336 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4338 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4339 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4340 AMD_FMT_MOD_SET(DCC, 1) |
4341 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4342 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4343 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4345 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4346 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4347 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4348 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4349 AMD_FMT_MOD_SET(DCC, 1) |
4350 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4351 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4352 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4353 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4355 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4356 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4357 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4358 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4360 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4361 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4362 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4363 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4366 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4367 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4368 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4369 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4371 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4372 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4373 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4377 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4378 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4380 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4381 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4383 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4385 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4386 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4387 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4388 AMD_FMT_MOD_SET(DCC, 1) |
4389 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4390 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4391 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4392 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4394 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4395 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4396 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4397 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4398 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4399 AMD_FMT_MOD_SET(DCC, 1) |
4400 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4401 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4402 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4403 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4404 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4406 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4407 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4408 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4409 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4410 AMD_FMT_MOD_SET(PACKERS, pkrs));
4412 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4413 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4414 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4415 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4416 AMD_FMT_MOD_SET(PACKERS, pkrs));
4418 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4419 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4421 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4423 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4424 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4425 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4429 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4431 uint64_t size = 0, capacity = 128;
4434 /* We have not hooked up any pre-GFX9 modifiers. */
4435 if (adev->family < AMDGPU_FAMILY_AI)
4438 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4440 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4441 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4442 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4443 return *mods ? 0 : -ENOMEM;
4446 switch (adev->family) {
4447 case AMDGPU_FAMILY_AI:
4448 case AMDGPU_FAMILY_RV:
4449 add_gfx9_modifiers(adev, mods, &size, &capacity);
4451 case AMDGPU_FAMILY_NV:
4452 case AMDGPU_FAMILY_VGH:
4453 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4454 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4456 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4460 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4462 /* INVALID marks the end of the list. */
4463 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4472 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4473 const struct amdgpu_framebuffer *afb,
4474 const enum surface_pixel_format format,
4475 const enum dc_rotation_angle rotation,
4476 const struct plane_size *plane_size,
4477 union dc_tiling_info *tiling_info,
4478 struct dc_plane_dcc_param *dcc,
4479 struct dc_plane_address *address,
4480 const bool force_disable_dcc)
4482 const uint64_t modifier = afb->base.modifier;
4485 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4486 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4488 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4489 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4492 dcc->meta_pitch = afb->base.pitches[1];
4493 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4495 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4496 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4499 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4507 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4508 const struct amdgpu_framebuffer *afb,
4509 const enum surface_pixel_format format,
4510 const enum dc_rotation_angle rotation,
4511 const uint64_t tiling_flags,
4512 union dc_tiling_info *tiling_info,
4513 struct plane_size *plane_size,
4514 struct dc_plane_dcc_param *dcc,
4515 struct dc_plane_address *address,
4517 bool force_disable_dcc)
4519 const struct drm_framebuffer *fb = &afb->base;
4522 memset(tiling_info, 0, sizeof(*tiling_info));
4523 memset(plane_size, 0, sizeof(*plane_size));
4524 memset(dcc, 0, sizeof(*dcc));
4525 memset(address, 0, sizeof(*address));
4527 address->tmz_surface = tmz_surface;
4529 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4530 uint64_t addr = afb->address + fb->offsets[0];
4532 plane_size->surface_size.x = 0;
4533 plane_size->surface_size.y = 0;
4534 plane_size->surface_size.width = fb->width;
4535 plane_size->surface_size.height = fb->height;
4536 plane_size->surface_pitch =
4537 fb->pitches[0] / fb->format->cpp[0];
4539 address->type = PLN_ADDR_TYPE_GRAPHICS;
4540 address->grph.addr.low_part = lower_32_bits(addr);
4541 address->grph.addr.high_part = upper_32_bits(addr);
4542 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4543 uint64_t luma_addr = afb->address + fb->offsets[0];
4544 uint64_t chroma_addr = afb->address + fb->offsets[1];
4546 plane_size->surface_size.x = 0;
4547 plane_size->surface_size.y = 0;
4548 plane_size->surface_size.width = fb->width;
4549 plane_size->surface_size.height = fb->height;
4550 plane_size->surface_pitch =
4551 fb->pitches[0] / fb->format->cpp[0];
4553 plane_size->chroma_size.x = 0;
4554 plane_size->chroma_size.y = 0;
4555 /* TODO: set these based on surface format */
4556 plane_size->chroma_size.width = fb->width / 2;
4557 plane_size->chroma_size.height = fb->height / 2;
4559 plane_size->chroma_pitch =
4560 fb->pitches[1] / fb->format->cpp[1];
4562 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4563 address->video_progressive.luma_addr.low_part =
4564 lower_32_bits(luma_addr);
4565 address->video_progressive.luma_addr.high_part =
4566 upper_32_bits(luma_addr);
4567 address->video_progressive.chroma_addr.low_part =
4568 lower_32_bits(chroma_addr);
4569 address->video_progressive.chroma_addr.high_part =
4570 upper_32_bits(chroma_addr);
4573 if (adev->family >= AMDGPU_FAMILY_AI) {
4574 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4575 rotation, plane_size,
4582 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4589 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4590 bool *per_pixel_alpha, bool *global_alpha,
4591 int *global_alpha_value)
4593 *per_pixel_alpha = false;
4594 *global_alpha = false;
4595 *global_alpha_value = 0xff;
4597 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4600 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4601 static const uint32_t alpha_formats[] = {
4602 DRM_FORMAT_ARGB8888,
4603 DRM_FORMAT_RGBA8888,
4604 DRM_FORMAT_ABGR8888,
4606 uint32_t format = plane_state->fb->format->format;
4609 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4610 if (format == alpha_formats[i]) {
4611 *per_pixel_alpha = true;
4617 if (plane_state->alpha < 0xffff) {
4618 *global_alpha = true;
4619 *global_alpha_value = plane_state->alpha >> 8;
4624 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4625 const enum surface_pixel_format format,
4626 enum dc_color_space *color_space)
4630 *color_space = COLOR_SPACE_SRGB;
4632 /* DRM color properties only affect non-RGB formats. */
4633 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4636 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4638 switch (plane_state->color_encoding) {
4639 case DRM_COLOR_YCBCR_BT601:
4641 *color_space = COLOR_SPACE_YCBCR601;
4643 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4646 case DRM_COLOR_YCBCR_BT709:
4648 *color_space = COLOR_SPACE_YCBCR709;
4650 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4653 case DRM_COLOR_YCBCR_BT2020:
4655 *color_space = COLOR_SPACE_2020_YCBCR;
4668 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4669 const struct drm_plane_state *plane_state,
4670 const uint64_t tiling_flags,
4671 struct dc_plane_info *plane_info,
4672 struct dc_plane_address *address,
4674 bool force_disable_dcc)
4676 const struct drm_framebuffer *fb = plane_state->fb;
4677 const struct amdgpu_framebuffer *afb =
4678 to_amdgpu_framebuffer(plane_state->fb);
4679 struct drm_format_name_buf format_name;
4682 memset(plane_info, 0, sizeof(*plane_info));
4684 switch (fb->format->format) {
4686 plane_info->format =
4687 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4689 case DRM_FORMAT_RGB565:
4690 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4692 case DRM_FORMAT_XRGB8888:
4693 case DRM_FORMAT_ARGB8888:
4694 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4696 case DRM_FORMAT_XRGB2101010:
4697 case DRM_FORMAT_ARGB2101010:
4698 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4700 case DRM_FORMAT_XBGR2101010:
4701 case DRM_FORMAT_ABGR2101010:
4702 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4704 case DRM_FORMAT_XBGR8888:
4705 case DRM_FORMAT_ABGR8888:
4706 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4708 case DRM_FORMAT_NV21:
4709 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4711 case DRM_FORMAT_NV12:
4712 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4714 case DRM_FORMAT_P010:
4715 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4717 case DRM_FORMAT_XRGB16161616F:
4718 case DRM_FORMAT_ARGB16161616F:
4719 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4721 case DRM_FORMAT_XBGR16161616F:
4722 case DRM_FORMAT_ABGR16161616F:
4723 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4727 "Unsupported screen format %s\n",
4728 drm_get_format_name(fb->format->format, &format_name));
4732 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4733 case DRM_MODE_ROTATE_0:
4734 plane_info->rotation = ROTATION_ANGLE_0;
4736 case DRM_MODE_ROTATE_90:
4737 plane_info->rotation = ROTATION_ANGLE_90;
4739 case DRM_MODE_ROTATE_180:
4740 plane_info->rotation = ROTATION_ANGLE_180;
4742 case DRM_MODE_ROTATE_270:
4743 plane_info->rotation = ROTATION_ANGLE_270;
4746 plane_info->rotation = ROTATION_ANGLE_0;
4750 plane_info->visible = true;
4751 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4753 plane_info->layer_index = 0;
4755 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4756 &plane_info->color_space);
4760 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4761 plane_info->rotation, tiling_flags,
4762 &plane_info->tiling_info,
4763 &plane_info->plane_size,
4764 &plane_info->dcc, address, tmz_surface,
4769 fill_blending_from_plane_state(
4770 plane_state, &plane_info->per_pixel_alpha,
4771 &plane_info->global_alpha, &plane_info->global_alpha_value);
4776 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4777 struct dc_plane_state *dc_plane_state,
4778 struct drm_plane_state *plane_state,
4779 struct drm_crtc_state *crtc_state)
4781 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4782 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4783 struct dc_scaling_info scaling_info;
4784 struct dc_plane_info plane_info;
4786 bool force_disable_dcc = false;
4788 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4792 dc_plane_state->src_rect = scaling_info.src_rect;
4793 dc_plane_state->dst_rect = scaling_info.dst_rect;
4794 dc_plane_state->clip_rect = scaling_info.clip_rect;
4795 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4797 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4798 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4801 &dc_plane_state->address,
4807 dc_plane_state->format = plane_info.format;
4808 dc_plane_state->color_space = plane_info.color_space;
4809 dc_plane_state->format = plane_info.format;
4810 dc_plane_state->plane_size = plane_info.plane_size;
4811 dc_plane_state->rotation = plane_info.rotation;
4812 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4813 dc_plane_state->stereo_format = plane_info.stereo_format;
4814 dc_plane_state->tiling_info = plane_info.tiling_info;
4815 dc_plane_state->visible = plane_info.visible;
4816 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4817 dc_plane_state->global_alpha = plane_info.global_alpha;
4818 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4819 dc_plane_state->dcc = plane_info.dcc;
4820 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4821 dc_plane_state->flip_int_enabled = true;
4824 * Always set input transfer function, since plane state is refreshed
4827 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4834 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4835 const struct dm_connector_state *dm_state,
4836 struct dc_stream_state *stream)
4838 enum amdgpu_rmx_type rmx_type;
4840 struct rect src = { 0 }; /* viewport in composition space*/
4841 struct rect dst = { 0 }; /* stream addressable area */
4843 /* no mode. nothing to be done */
4847 /* Full screen scaling by default */
4848 src.width = mode->hdisplay;
4849 src.height = mode->vdisplay;
4850 dst.width = stream->timing.h_addressable;
4851 dst.height = stream->timing.v_addressable;
4854 rmx_type = dm_state->scaling;
4855 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4856 if (src.width * dst.height <
4857 src.height * dst.width) {
4858 /* height needs less upscaling/more downscaling */
4859 dst.width = src.width *
4860 dst.height / src.height;
4862 /* width needs less upscaling/more downscaling */
4863 dst.height = src.height *
4864 dst.width / src.width;
4866 } else if (rmx_type == RMX_CENTER) {
4870 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4871 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4873 if (dm_state->underscan_enable) {
4874 dst.x += dm_state->underscan_hborder / 2;
4875 dst.y += dm_state->underscan_vborder / 2;
4876 dst.width -= dm_state->underscan_hborder;
4877 dst.height -= dm_state->underscan_vborder;
4884 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4885 dst.x, dst.y, dst.width, dst.height);
4889 static enum dc_color_depth
4890 convert_color_depth_from_display_info(const struct drm_connector *connector,
4891 bool is_y420, int requested_bpc)
4898 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4899 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4901 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4903 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4906 bpc = (uint8_t)connector->display_info.bpc;
4907 /* Assume 8 bpc by default if no bpc is specified. */
4908 bpc = bpc ? bpc : 8;
4911 if (requested_bpc > 0) {
4913 * Cap display bpc based on the user requested value.
4915 * The value for state->max_bpc may not correctly updated
4916 * depending on when the connector gets added to the state
4917 * or if this was called outside of atomic check, so it
4918 * can't be used directly.
4920 bpc = min_t(u8, bpc, requested_bpc);
4922 /* Round down to the nearest even number. */
4923 bpc = bpc - (bpc & 1);
4929 * Temporary Work around, DRM doesn't parse color depth for
4930 * EDID revision before 1.4
4931 * TODO: Fix edid parsing
4933 return COLOR_DEPTH_888;
4935 return COLOR_DEPTH_666;
4937 return COLOR_DEPTH_888;
4939 return COLOR_DEPTH_101010;
4941 return COLOR_DEPTH_121212;
4943 return COLOR_DEPTH_141414;
4945 return COLOR_DEPTH_161616;
4947 return COLOR_DEPTH_UNDEFINED;
4951 static enum dc_aspect_ratio
4952 get_aspect_ratio(const struct drm_display_mode *mode_in)
4954 /* 1-1 mapping, since both enums follow the HDMI spec. */
4955 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4958 static enum dc_color_space
4959 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4961 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4963 switch (dc_crtc_timing->pixel_encoding) {
4964 case PIXEL_ENCODING_YCBCR422:
4965 case PIXEL_ENCODING_YCBCR444:
4966 case PIXEL_ENCODING_YCBCR420:
4969 * 27030khz is the separation point between HDTV and SDTV
4970 * according to HDMI spec, we use YCbCr709 and YCbCr601
4973 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4974 if (dc_crtc_timing->flags.Y_ONLY)
4976 COLOR_SPACE_YCBCR709_LIMITED;
4978 color_space = COLOR_SPACE_YCBCR709;
4980 if (dc_crtc_timing->flags.Y_ONLY)
4982 COLOR_SPACE_YCBCR601_LIMITED;
4984 color_space = COLOR_SPACE_YCBCR601;
4989 case PIXEL_ENCODING_RGB:
4990 color_space = COLOR_SPACE_SRGB;
5001 static bool adjust_colour_depth_from_display_info(
5002 struct dc_crtc_timing *timing_out,
5003 const struct drm_display_info *info)
5005 enum dc_color_depth depth = timing_out->display_color_depth;
5008 normalized_clk = timing_out->pix_clk_100hz / 10;
5009 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5010 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5011 normalized_clk /= 2;
5012 /* Adjusting pix clock following on HDMI spec based on colour depth */
5014 case COLOR_DEPTH_888:
5016 case COLOR_DEPTH_101010:
5017 normalized_clk = (normalized_clk * 30) / 24;
5019 case COLOR_DEPTH_121212:
5020 normalized_clk = (normalized_clk * 36) / 24;
5022 case COLOR_DEPTH_161616:
5023 normalized_clk = (normalized_clk * 48) / 24;
5026 /* The above depths are the only ones valid for HDMI. */
5029 if (normalized_clk <= info->max_tmds_clock) {
5030 timing_out->display_color_depth = depth;
5033 } while (--depth > COLOR_DEPTH_666);
5037 static void fill_stream_properties_from_drm_display_mode(
5038 struct dc_stream_state *stream,
5039 const struct drm_display_mode *mode_in,
5040 const struct drm_connector *connector,
5041 const struct drm_connector_state *connector_state,
5042 const struct dc_stream_state *old_stream,
5045 struct dc_crtc_timing *timing_out = &stream->timing;
5046 const struct drm_display_info *info = &connector->display_info;
5047 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5048 struct hdmi_vendor_infoframe hv_frame;
5049 struct hdmi_avi_infoframe avi_frame;
5051 memset(&hv_frame, 0, sizeof(hv_frame));
5052 memset(&avi_frame, 0, sizeof(avi_frame));
5054 timing_out->h_border_left = 0;
5055 timing_out->h_border_right = 0;
5056 timing_out->v_border_top = 0;
5057 timing_out->v_border_bottom = 0;
5058 /* TODO: un-hardcode */
5059 if (drm_mode_is_420_only(info, mode_in)
5060 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5061 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5062 else if (drm_mode_is_420_also(info, mode_in)
5063 && aconnector->force_yuv420_output)
5064 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5065 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5066 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5067 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5069 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5071 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5072 timing_out->display_color_depth = convert_color_depth_from_display_info(
5074 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5076 timing_out->scan_type = SCANNING_TYPE_NODATA;
5077 timing_out->hdmi_vic = 0;
5080 timing_out->vic = old_stream->timing.vic;
5081 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5082 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5084 timing_out->vic = drm_match_cea_mode(mode_in);
5085 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5086 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5087 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5088 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5091 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5092 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5093 timing_out->vic = avi_frame.video_code;
5094 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5095 timing_out->hdmi_vic = hv_frame.vic;
5098 timing_out->h_addressable = mode_in->hdisplay;
5099 timing_out->h_total = mode_in->htotal;
5100 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5101 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5102 timing_out->v_total = mode_in->vtotal;
5103 timing_out->v_addressable = mode_in->vdisplay;
5104 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5105 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5106 timing_out->pix_clk_100hz = mode_in->clock * 10;
5108 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5110 stream->output_color_space = get_output_color_space(timing_out);
5112 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5113 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5114 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5115 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5116 drm_mode_is_420_also(info, mode_in) &&
5117 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5118 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5119 adjust_colour_depth_from_display_info(timing_out, info);
5124 static void fill_audio_info(struct audio_info *audio_info,
5125 const struct drm_connector *drm_connector,
5126 const struct dc_sink *dc_sink)
5129 int cea_revision = 0;
5130 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5132 audio_info->manufacture_id = edid_caps->manufacturer_id;
5133 audio_info->product_id = edid_caps->product_id;
5135 cea_revision = drm_connector->display_info.cea_rev;
5137 strscpy(audio_info->display_name,
5138 edid_caps->display_name,
5139 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5141 if (cea_revision >= 3) {
5142 audio_info->mode_count = edid_caps->audio_mode_count;
5144 for (i = 0; i < audio_info->mode_count; ++i) {
5145 audio_info->modes[i].format_code =
5146 (enum audio_format_code)
5147 (edid_caps->audio_modes[i].format_code);
5148 audio_info->modes[i].channel_count =
5149 edid_caps->audio_modes[i].channel_count;
5150 audio_info->modes[i].sample_rates.all =
5151 edid_caps->audio_modes[i].sample_rate;
5152 audio_info->modes[i].sample_size =
5153 edid_caps->audio_modes[i].sample_size;
5157 audio_info->flags.all = edid_caps->speaker_flags;
5159 /* TODO: We only check for the progressive mode, check for interlace mode too */
5160 if (drm_connector->latency_present[0]) {
5161 audio_info->video_latency = drm_connector->video_latency[0];
5162 audio_info->audio_latency = drm_connector->audio_latency[0];
5165 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5170 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5171 struct drm_display_mode *dst_mode)
5173 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5174 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5175 dst_mode->crtc_clock = src_mode->crtc_clock;
5176 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5177 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5178 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5179 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5180 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5181 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5182 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5183 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5184 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5185 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5186 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5190 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5191 const struct drm_display_mode *native_mode,
5194 if (scale_enabled) {
5195 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5196 } else if (native_mode->clock == drm_mode->clock &&
5197 native_mode->htotal == drm_mode->htotal &&
5198 native_mode->vtotal == drm_mode->vtotal) {
5199 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5201 /* no scaling nor amdgpu inserted, no need to patch */
5205 static struct dc_sink *
5206 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5208 struct dc_sink_init_data sink_init_data = { 0 };
5209 struct dc_sink *sink = NULL;
5210 sink_init_data.link = aconnector->dc_link;
5211 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5213 sink = dc_sink_create(&sink_init_data);
5215 DRM_ERROR("Failed to create sink!\n");
5218 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5223 static void set_multisync_trigger_params(
5224 struct dc_stream_state *stream)
5226 if (stream->triggered_crtc_reset.enabled) {
5227 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5228 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5232 static void set_master_stream(struct dc_stream_state *stream_set[],
5235 int j, highest_rfr = 0, master_stream = 0;
5237 for (j = 0; j < stream_count; j++) {
5238 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5239 int refresh_rate = 0;
5241 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5242 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5243 if (refresh_rate > highest_rfr) {
5244 highest_rfr = refresh_rate;
5249 for (j = 0; j < stream_count; j++) {
5251 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5255 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5259 if (context->stream_count < 2)
5261 for (i = 0; i < context->stream_count ; i++) {
5262 if (!context->streams[i])
5265 * TODO: add a function to read AMD VSDB bits and set
5266 * crtc_sync_master.multi_sync_enabled flag
5267 * For now it's set to false
5269 set_multisync_trigger_params(context->streams[i]);
5271 set_master_stream(context->streams, context->stream_count);
5274 static struct drm_display_mode *
5275 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5276 bool use_probed_modes)
5278 struct drm_display_mode *m, *m_pref = NULL;
5279 u16 current_refresh, highest_refresh;
5280 struct list_head *list_head = use_probed_modes ?
5281 &aconnector->base.probed_modes :
5282 &aconnector->base.modes;
5284 if (aconnector->freesync_vid_base.clock != 0)
5285 return &aconnector->freesync_vid_base;
5287 /* Find the preferred mode */
5288 list_for_each_entry (m, list_head, head) {
5289 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5296 /* Probably an EDID with no preferred mode. Fallback to first entry */
5297 m_pref = list_first_entry_or_null(
5298 &aconnector->base.modes, struct drm_display_mode, head);
5300 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5305 highest_refresh = drm_mode_vrefresh(m_pref);
5308 * Find the mode with highest refresh rate with same resolution.
5309 * For some monitors, preferred mode is not the mode with highest
5310 * supported refresh rate.
5312 list_for_each_entry (m, list_head, head) {
5313 current_refresh = drm_mode_vrefresh(m);
5315 if (m->hdisplay == m_pref->hdisplay &&
5316 m->vdisplay == m_pref->vdisplay &&
5317 highest_refresh < current_refresh) {
5318 highest_refresh = current_refresh;
5323 aconnector->freesync_vid_base = *m_pref;
5327 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5328 struct amdgpu_dm_connector *aconnector)
5330 struct drm_display_mode *high_mode;
5333 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5334 if (!high_mode || !mode)
5337 timing_diff = high_mode->vtotal - mode->vtotal;
5339 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5340 high_mode->hdisplay != mode->hdisplay ||
5341 high_mode->vdisplay != mode->vdisplay ||
5342 high_mode->hsync_start != mode->hsync_start ||
5343 high_mode->hsync_end != mode->hsync_end ||
5344 high_mode->htotal != mode->htotal ||
5345 high_mode->hskew != mode->hskew ||
5346 high_mode->vscan != mode->vscan ||
5347 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5348 high_mode->vsync_end - mode->vsync_end != timing_diff)
5354 static struct dc_stream_state *
5355 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5356 const struct drm_display_mode *drm_mode,
5357 const struct dm_connector_state *dm_state,
5358 const struct dc_stream_state *old_stream,
5361 struct drm_display_mode *preferred_mode = NULL;
5362 struct drm_connector *drm_connector;
5363 const struct drm_connector_state *con_state =
5364 dm_state ? &dm_state->base : NULL;
5365 struct dc_stream_state *stream = NULL;
5366 struct drm_display_mode mode = *drm_mode;
5367 struct drm_display_mode saved_mode;
5368 struct drm_display_mode *freesync_mode = NULL;
5369 bool native_mode_found = false;
5370 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5372 int preferred_refresh = 0;
5373 #if defined(CONFIG_DRM_AMD_DC_DCN)
5374 struct dsc_dec_dpcd_caps dsc_caps;
5375 uint32_t link_bandwidth_kbps;
5377 struct dc_sink *sink = NULL;
5379 memset(&saved_mode, 0, sizeof(saved_mode));
5381 if (aconnector == NULL) {
5382 DRM_ERROR("aconnector is NULL!\n");
5386 drm_connector = &aconnector->base;
5388 if (!aconnector->dc_sink) {
5389 sink = create_fake_sink(aconnector);
5393 sink = aconnector->dc_sink;
5394 dc_sink_retain(sink);
5397 stream = dc_create_stream_for_sink(sink);
5399 if (stream == NULL) {
5400 DRM_ERROR("Failed to create stream for sink!\n");
5404 stream->dm_stream_context = aconnector;
5406 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5407 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5409 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5410 /* Search for preferred mode */
5411 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5412 native_mode_found = true;
5416 if (!native_mode_found)
5417 preferred_mode = list_first_entry_or_null(
5418 &aconnector->base.modes,
5419 struct drm_display_mode,
5422 mode_refresh = drm_mode_vrefresh(&mode);
5424 if (preferred_mode == NULL) {
5426 * This may not be an error, the use case is when we have no
5427 * usermode calls to reset and set mode upon hotplug. In this
5428 * case, we call set mode ourselves to restore the previous mode
5429 * and the modelist may not be filled in in time.
5431 DRM_DEBUG_DRIVER("No preferred mode found\n");
5433 recalculate_timing |= amdgpu_freesync_vid_mode &&
5434 is_freesync_video_mode(&mode, aconnector);
5435 if (recalculate_timing) {
5436 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5438 mode = *freesync_mode;
5440 decide_crtc_timing_for_drm_display_mode(
5441 &mode, preferred_mode,
5442 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5445 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5448 if (recalculate_timing)
5449 drm_mode_set_crtcinfo(&saved_mode, 0);
5451 drm_mode_set_crtcinfo(&mode, 0);
5454 * If scaling is enabled and refresh rate didn't change
5455 * we copy the vic and polarities of the old timings
5457 if (!recalculate_timing || mode_refresh != preferred_refresh)
5458 fill_stream_properties_from_drm_display_mode(
5459 stream, &mode, &aconnector->base, con_state, NULL,
5462 fill_stream_properties_from_drm_display_mode(
5463 stream, &mode, &aconnector->base, con_state, old_stream,
5466 stream->timing.flags.DSC = 0;
5468 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5469 #if defined(CONFIG_DRM_AMD_DC_DCN)
5470 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5471 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5472 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5474 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5475 dc_link_get_link_cap(aconnector->dc_link));
5477 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5478 /* Set DSC policy according to dsc_clock_en */
5479 dc_dsc_policy_set_enable_dsc_when_not_needed(
5480 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5482 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5484 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5486 link_bandwidth_kbps,
5488 &stream->timing.dsc_cfg))
5489 stream->timing.flags.DSC = 1;
5490 /* Overwrite the stream flag if DSC is enabled through debugfs */
5491 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5492 stream->timing.flags.DSC = 1;
5494 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5495 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5497 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5498 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5500 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5501 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5506 update_stream_scaling_settings(&mode, dm_state, stream);
5509 &stream->audio_info,
5513 update_stream_signal(stream, sink);
5515 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5516 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5518 if (stream->link->psr_settings.psr_feature_enabled) {
5520 // should decide stream support vsc sdp colorimetry capability
5521 // before building vsc info packet
5523 stream->use_vsc_sdp_for_colorimetry = false;
5524 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5525 stream->use_vsc_sdp_for_colorimetry =
5526 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5528 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5529 stream->use_vsc_sdp_for_colorimetry = true;
5531 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5534 dc_sink_release(sink);
5539 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5541 drm_crtc_cleanup(crtc);
5545 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5546 struct drm_crtc_state *state)
5548 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5550 /* TODO Destroy dc_stream objects are stream object is flattened */
5552 dc_stream_release(cur->stream);
5555 __drm_atomic_helper_crtc_destroy_state(state);
5561 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5563 struct dm_crtc_state *state;
5566 dm_crtc_destroy_state(crtc, crtc->state);
5568 state = kzalloc(sizeof(*state), GFP_KERNEL);
5569 if (WARN_ON(!state))
5572 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5575 static struct drm_crtc_state *
5576 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5578 struct dm_crtc_state *state, *cur;
5580 cur = to_dm_crtc_state(crtc->state);
5582 if (WARN_ON(!crtc->state))
5585 state = kzalloc(sizeof(*state), GFP_KERNEL);
5589 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5592 state->stream = cur->stream;
5593 dc_stream_retain(state->stream);
5596 state->active_planes = cur->active_planes;
5597 state->vrr_infopacket = cur->vrr_infopacket;
5598 state->abm_level = cur->abm_level;
5599 state->vrr_supported = cur->vrr_supported;
5600 state->freesync_config = cur->freesync_config;
5601 state->cm_has_degamma = cur->cm_has_degamma;
5602 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5603 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5605 return &state->base;
5608 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5609 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5611 crtc_debugfs_init(crtc);
5617 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5619 enum dc_irq_source irq_source;
5620 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5621 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5624 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5626 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5628 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5629 acrtc->crtc_id, enable ? "en" : "dis", rc);
5633 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5635 enum dc_irq_source irq_source;
5636 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5637 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5638 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5639 #if defined(CONFIG_DRM_AMD_DC_DCN)
5640 struct amdgpu_display_manager *dm = &adev->dm;
5641 unsigned long flags;
5646 /* vblank irq on -> Only need vupdate irq in vrr mode */
5647 if (amdgpu_dm_vrr_active(acrtc_state))
5648 rc = dm_set_vupdate_irq(crtc, true);
5650 /* vblank irq off -> vupdate irq off */
5651 rc = dm_set_vupdate_irq(crtc, false);
5657 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5659 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5662 if (amdgpu_in_reset(adev))
5665 #if defined(CONFIG_DRM_AMD_DC_DCN)
5666 spin_lock_irqsave(&dm->vblank_lock, flags);
5667 dm->vblank_workqueue->dm = dm;
5668 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5669 dm->vblank_workqueue->enable = enable;
5670 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5671 schedule_work(&dm->vblank_workqueue->mall_work);
5677 static int dm_enable_vblank(struct drm_crtc *crtc)
5679 return dm_set_vblank(crtc, true);
5682 static void dm_disable_vblank(struct drm_crtc *crtc)
5684 dm_set_vblank(crtc, false);
5687 /* Implemented only the options currently availible for the driver */
5688 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5689 .reset = dm_crtc_reset_state,
5690 .destroy = amdgpu_dm_crtc_destroy,
5691 .set_config = drm_atomic_helper_set_config,
5692 .page_flip = drm_atomic_helper_page_flip,
5693 .atomic_duplicate_state = dm_crtc_duplicate_state,
5694 .atomic_destroy_state = dm_crtc_destroy_state,
5695 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5696 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5697 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5698 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5699 .enable_vblank = dm_enable_vblank,
5700 .disable_vblank = dm_disable_vblank,
5701 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5703 .late_register = amdgpu_dm_crtc_late_register,
5707 static enum drm_connector_status
5708 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5711 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5715 * 1. This interface is NOT called in context of HPD irq.
5716 * 2. This interface *is called* in context of user-mode ioctl. Which
5717 * makes it a bad place for *any* MST-related activity.
5720 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5721 !aconnector->fake_enable)
5722 connected = (aconnector->dc_sink != NULL);
5724 connected = (aconnector->base.force == DRM_FORCE_ON);
5726 update_subconnector_property(aconnector);
5728 return (connected ? connector_status_connected :
5729 connector_status_disconnected);
5732 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5733 struct drm_connector_state *connector_state,
5734 struct drm_property *property,
5737 struct drm_device *dev = connector->dev;
5738 struct amdgpu_device *adev = drm_to_adev(dev);
5739 struct dm_connector_state *dm_old_state =
5740 to_dm_connector_state(connector->state);
5741 struct dm_connector_state *dm_new_state =
5742 to_dm_connector_state(connector_state);
5746 if (property == dev->mode_config.scaling_mode_property) {
5747 enum amdgpu_rmx_type rmx_type;
5750 case DRM_MODE_SCALE_CENTER:
5751 rmx_type = RMX_CENTER;
5753 case DRM_MODE_SCALE_ASPECT:
5754 rmx_type = RMX_ASPECT;
5756 case DRM_MODE_SCALE_FULLSCREEN:
5757 rmx_type = RMX_FULL;
5759 case DRM_MODE_SCALE_NONE:
5765 if (dm_old_state->scaling == rmx_type)
5768 dm_new_state->scaling = rmx_type;
5770 } else if (property == adev->mode_info.underscan_hborder_property) {
5771 dm_new_state->underscan_hborder = val;
5773 } else if (property == adev->mode_info.underscan_vborder_property) {
5774 dm_new_state->underscan_vborder = val;
5776 } else if (property == adev->mode_info.underscan_property) {
5777 dm_new_state->underscan_enable = val;
5779 } else if (property == adev->mode_info.abm_level_property) {
5780 dm_new_state->abm_level = val;
5787 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5788 const struct drm_connector_state *state,
5789 struct drm_property *property,
5792 struct drm_device *dev = connector->dev;
5793 struct amdgpu_device *adev = drm_to_adev(dev);
5794 struct dm_connector_state *dm_state =
5795 to_dm_connector_state(state);
5798 if (property == dev->mode_config.scaling_mode_property) {
5799 switch (dm_state->scaling) {
5801 *val = DRM_MODE_SCALE_CENTER;
5804 *val = DRM_MODE_SCALE_ASPECT;
5807 *val = DRM_MODE_SCALE_FULLSCREEN;
5811 *val = DRM_MODE_SCALE_NONE;
5815 } else if (property == adev->mode_info.underscan_hborder_property) {
5816 *val = dm_state->underscan_hborder;
5818 } else if (property == adev->mode_info.underscan_vborder_property) {
5819 *val = dm_state->underscan_vborder;
5821 } else if (property == adev->mode_info.underscan_property) {
5822 *val = dm_state->underscan_enable;
5824 } else if (property == adev->mode_info.abm_level_property) {
5825 *val = dm_state->abm_level;
5832 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5834 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5836 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5839 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5841 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5842 const struct dc_link *link = aconnector->dc_link;
5843 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5844 struct amdgpu_display_manager *dm = &adev->dm;
5847 * Call only if mst_mgr was iniitalized before since it's not done
5848 * for all connector types.
5850 if (aconnector->mst_mgr.dev)
5851 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5853 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5854 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5856 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5857 link->type != dc_connection_none &&
5858 dm->backlight_dev) {
5859 backlight_device_unregister(dm->backlight_dev);
5860 dm->backlight_dev = NULL;
5864 if (aconnector->dc_em_sink)
5865 dc_sink_release(aconnector->dc_em_sink);
5866 aconnector->dc_em_sink = NULL;
5867 if (aconnector->dc_sink)
5868 dc_sink_release(aconnector->dc_sink);
5869 aconnector->dc_sink = NULL;
5871 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5872 drm_connector_unregister(connector);
5873 drm_connector_cleanup(connector);
5874 if (aconnector->i2c) {
5875 i2c_del_adapter(&aconnector->i2c->base);
5876 kfree(aconnector->i2c);
5878 kfree(aconnector->dm_dp_aux.aux.name);
5883 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5885 struct dm_connector_state *state =
5886 to_dm_connector_state(connector->state);
5888 if (connector->state)
5889 __drm_atomic_helper_connector_destroy_state(connector->state);
5893 state = kzalloc(sizeof(*state), GFP_KERNEL);
5896 state->scaling = RMX_OFF;
5897 state->underscan_enable = false;
5898 state->underscan_hborder = 0;
5899 state->underscan_vborder = 0;
5900 state->base.max_requested_bpc = 8;
5901 state->vcpi_slots = 0;
5903 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5904 state->abm_level = amdgpu_dm_abm_level;
5906 __drm_atomic_helper_connector_reset(connector, &state->base);
5910 struct drm_connector_state *
5911 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5913 struct dm_connector_state *state =
5914 to_dm_connector_state(connector->state);
5916 struct dm_connector_state *new_state =
5917 kmemdup(state, sizeof(*state), GFP_KERNEL);
5922 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5924 new_state->freesync_capable = state->freesync_capable;
5925 new_state->abm_level = state->abm_level;
5926 new_state->scaling = state->scaling;
5927 new_state->underscan_enable = state->underscan_enable;
5928 new_state->underscan_hborder = state->underscan_hborder;
5929 new_state->underscan_vborder = state->underscan_vborder;
5930 new_state->vcpi_slots = state->vcpi_slots;
5931 new_state->pbn = state->pbn;
5932 return &new_state->base;
5936 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5938 struct amdgpu_dm_connector *amdgpu_dm_connector =
5939 to_amdgpu_dm_connector(connector);
5942 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5943 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5944 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5945 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5950 #if defined(CONFIG_DEBUG_FS)
5951 connector_debugfs_init(amdgpu_dm_connector);
5957 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5958 .reset = amdgpu_dm_connector_funcs_reset,
5959 .detect = amdgpu_dm_connector_detect,
5960 .fill_modes = drm_helper_probe_single_connector_modes,
5961 .destroy = amdgpu_dm_connector_destroy,
5962 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5963 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5964 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5965 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5966 .late_register = amdgpu_dm_connector_late_register,
5967 .early_unregister = amdgpu_dm_connector_unregister
5970 static int get_modes(struct drm_connector *connector)
5972 return amdgpu_dm_connector_get_modes(connector);
5975 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5977 struct dc_sink_init_data init_params = {
5978 .link = aconnector->dc_link,
5979 .sink_signal = SIGNAL_TYPE_VIRTUAL
5983 if (!aconnector->base.edid_blob_ptr) {
5984 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5985 aconnector->base.name);
5987 aconnector->base.force = DRM_FORCE_OFF;
5988 aconnector->base.override_edid = false;
5992 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5994 aconnector->edid = edid;
5996 aconnector->dc_em_sink = dc_link_add_remote_sink(
5997 aconnector->dc_link,
5999 (edid->extensions + 1) * EDID_LENGTH,
6002 if (aconnector->base.force == DRM_FORCE_ON) {
6003 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6004 aconnector->dc_link->local_sink :
6005 aconnector->dc_em_sink;
6006 dc_sink_retain(aconnector->dc_sink);
6010 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6012 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6015 * In case of headless boot with force on for DP managed connector
6016 * Those settings have to be != 0 to get initial modeset
6018 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6019 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6020 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6024 aconnector->base.override_edid = true;
6025 create_eml_sink(aconnector);
6028 static struct dc_stream_state *
6029 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6030 const struct drm_display_mode *drm_mode,
6031 const struct dm_connector_state *dm_state,
6032 const struct dc_stream_state *old_stream)
6034 struct drm_connector *connector = &aconnector->base;
6035 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6036 struct dc_stream_state *stream;
6037 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6038 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6039 enum dc_status dc_result = DC_OK;
6042 stream = create_stream_for_sink(aconnector, drm_mode,
6043 dm_state, old_stream,
6045 if (stream == NULL) {
6046 DRM_ERROR("Failed to create stream for sink!\n");
6050 dc_result = dc_validate_stream(adev->dm.dc, stream);
6052 if (dc_result != DC_OK) {
6053 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6058 dc_status_to_str(dc_result));
6060 dc_stream_release(stream);
6062 requested_bpc -= 2; /* lower bpc to retry validation */
6065 } while (stream == NULL && requested_bpc >= 6);
6070 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6071 struct drm_display_mode *mode)
6073 int result = MODE_ERROR;
6074 struct dc_sink *dc_sink;
6075 /* TODO: Unhardcode stream count */
6076 struct dc_stream_state *stream;
6077 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6079 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6080 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6084 * Only run this the first time mode_valid is called to initilialize
6087 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6088 !aconnector->dc_em_sink)
6089 handle_edid_mgmt(aconnector);
6091 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6093 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6094 aconnector->base.force != DRM_FORCE_ON) {
6095 DRM_ERROR("dc_sink is NULL!\n");
6099 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6101 dc_stream_release(stream);
6106 /* TODO: error handling*/
6110 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6111 struct dc_info_packet *out)
6113 struct hdmi_drm_infoframe frame;
6114 unsigned char buf[30]; /* 26 + 4 */
6118 memset(out, 0, sizeof(*out));
6120 if (!state->hdr_output_metadata)
6123 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6127 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6131 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6135 /* Prepare the infopacket for DC. */
6136 switch (state->connector->connector_type) {
6137 case DRM_MODE_CONNECTOR_HDMIA:
6138 out->hb0 = 0x87; /* type */
6139 out->hb1 = 0x01; /* version */
6140 out->hb2 = 0x1A; /* length */
6141 out->sb[0] = buf[3]; /* checksum */
6145 case DRM_MODE_CONNECTOR_DisplayPort:
6146 case DRM_MODE_CONNECTOR_eDP:
6147 out->hb0 = 0x00; /* sdp id, zero */
6148 out->hb1 = 0x87; /* type */
6149 out->hb2 = 0x1D; /* payload len - 1 */
6150 out->hb3 = (0x13 << 2); /* sdp version */
6151 out->sb[0] = 0x01; /* version */
6152 out->sb[1] = 0x1A; /* length */
6160 memcpy(&out->sb[i], &buf[4], 26);
6163 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6164 sizeof(out->sb), false);
6170 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6171 const struct drm_connector_state *new_state)
6173 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6174 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6176 if (old_blob != new_blob) {
6177 if (old_blob && new_blob &&
6178 old_blob->length == new_blob->length)
6179 return memcmp(old_blob->data, new_blob->data,
6189 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6190 struct drm_atomic_state *state)
6192 struct drm_connector_state *new_con_state =
6193 drm_atomic_get_new_connector_state(state, conn);
6194 struct drm_connector_state *old_con_state =
6195 drm_atomic_get_old_connector_state(state, conn);
6196 struct drm_crtc *crtc = new_con_state->crtc;
6197 struct drm_crtc_state *new_crtc_state;
6200 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6205 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6206 struct dc_info_packet hdr_infopacket;
6208 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6212 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6213 if (IS_ERR(new_crtc_state))
6214 return PTR_ERR(new_crtc_state);
6217 * DC considers the stream backends changed if the
6218 * static metadata changes. Forcing the modeset also
6219 * gives a simple way for userspace to switch from
6220 * 8bpc to 10bpc when setting the metadata to enter
6223 * Changing the static metadata after it's been
6224 * set is permissible, however. So only force a
6225 * modeset if we're entering or exiting HDR.
6227 new_crtc_state->mode_changed =
6228 !old_con_state->hdr_output_metadata ||
6229 !new_con_state->hdr_output_metadata;
6235 static const struct drm_connector_helper_funcs
6236 amdgpu_dm_connector_helper_funcs = {
6238 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6239 * modes will be filtered by drm_mode_validate_size(), and those modes
6240 * are missing after user start lightdm. So we need to renew modes list.
6241 * in get_modes call back, not just return the modes count
6243 .get_modes = get_modes,
6244 .mode_valid = amdgpu_dm_connector_mode_valid,
6245 .atomic_check = amdgpu_dm_connector_atomic_check,
6248 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6252 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6254 struct drm_atomic_state *state = new_crtc_state->state;
6255 struct drm_plane *plane;
6258 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6259 struct drm_plane_state *new_plane_state;
6261 /* Cursor planes are "fake". */
6262 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6265 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6267 if (!new_plane_state) {
6269 * The plane is enable on the CRTC and hasn't changed
6270 * state. This means that it previously passed
6271 * validation and is therefore enabled.
6277 /* We need a framebuffer to be considered enabled. */
6278 num_active += (new_plane_state->fb != NULL);
6284 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6285 struct drm_crtc_state *new_crtc_state)
6287 struct dm_crtc_state *dm_new_crtc_state =
6288 to_dm_crtc_state(new_crtc_state);
6290 dm_new_crtc_state->active_planes = 0;
6292 if (!dm_new_crtc_state->stream)
6295 dm_new_crtc_state->active_planes =
6296 count_crtc_active_planes(new_crtc_state);
6299 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6300 struct drm_atomic_state *state)
6302 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6304 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6305 struct dc *dc = adev->dm.dc;
6306 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6309 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6311 dm_update_crtc_active_planes(crtc, crtc_state);
6313 if (unlikely(!dm_crtc_state->stream &&
6314 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6320 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6321 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6322 * planes are disabled, which is not supported by the hardware. And there is legacy
6323 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6325 if (crtc_state->enable &&
6326 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6327 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6331 /* In some use cases, like reset, no stream is attached */
6332 if (!dm_crtc_state->stream)
6335 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6338 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6342 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6343 const struct drm_display_mode *mode,
6344 struct drm_display_mode *adjusted_mode)
6349 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6350 .disable = dm_crtc_helper_disable,
6351 .atomic_check = dm_crtc_helper_atomic_check,
6352 .mode_fixup = dm_crtc_helper_mode_fixup,
6353 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6356 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6361 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6363 switch (display_color_depth) {
6364 case COLOR_DEPTH_666:
6366 case COLOR_DEPTH_888:
6368 case COLOR_DEPTH_101010:
6370 case COLOR_DEPTH_121212:
6372 case COLOR_DEPTH_141414:
6374 case COLOR_DEPTH_161616:
6382 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6383 struct drm_crtc_state *crtc_state,
6384 struct drm_connector_state *conn_state)
6386 struct drm_atomic_state *state = crtc_state->state;
6387 struct drm_connector *connector = conn_state->connector;
6388 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6389 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6390 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6391 struct drm_dp_mst_topology_mgr *mst_mgr;
6392 struct drm_dp_mst_port *mst_port;
6393 enum dc_color_depth color_depth;
6395 bool is_y420 = false;
6397 if (!aconnector->port || !aconnector->dc_sink)
6400 mst_port = aconnector->port;
6401 mst_mgr = &aconnector->mst_port->mst_mgr;
6403 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6406 if (!state->duplicated) {
6407 int max_bpc = conn_state->max_requested_bpc;
6408 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6409 aconnector->force_yuv420_output;
6410 color_depth = convert_color_depth_from_display_info(connector,
6413 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6414 clock = adjusted_mode->clock;
6415 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6417 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6420 dm_new_connector_state->pbn,
6421 dm_mst_get_pbn_divider(aconnector->dc_link));
6422 if (dm_new_connector_state->vcpi_slots < 0) {
6423 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6424 return dm_new_connector_state->vcpi_slots;
6429 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6430 .disable = dm_encoder_helper_disable,
6431 .atomic_check = dm_encoder_helper_atomic_check
6434 #if defined(CONFIG_DRM_AMD_DC_DCN)
6435 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6436 struct dc_state *dc_state)
6438 struct dc_stream_state *stream = NULL;
6439 struct drm_connector *connector;
6440 struct drm_connector_state *new_con_state, *old_con_state;
6441 struct amdgpu_dm_connector *aconnector;
6442 struct dm_connector_state *dm_conn_state;
6443 int i, j, clock, bpp;
6444 int vcpi, pbn_div, pbn = 0;
6446 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6448 aconnector = to_amdgpu_dm_connector(connector);
6450 if (!aconnector->port)
6453 if (!new_con_state || !new_con_state->crtc)
6456 dm_conn_state = to_dm_connector_state(new_con_state);
6458 for (j = 0; j < dc_state->stream_count; j++) {
6459 stream = dc_state->streams[j];
6463 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6472 if (stream->timing.flags.DSC != 1) {
6473 drm_dp_mst_atomic_enable_dsc(state,
6481 pbn_div = dm_mst_get_pbn_divider(stream->link);
6482 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6483 clock = stream->timing.pix_clk_100hz / 10;
6484 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6485 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6492 dm_conn_state->pbn = pbn;
6493 dm_conn_state->vcpi_slots = vcpi;
6499 static void dm_drm_plane_reset(struct drm_plane *plane)
6501 struct dm_plane_state *amdgpu_state = NULL;
6504 plane->funcs->atomic_destroy_state(plane, plane->state);
6506 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6507 WARN_ON(amdgpu_state == NULL);
6510 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6513 static struct drm_plane_state *
6514 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6516 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6518 old_dm_plane_state = to_dm_plane_state(plane->state);
6519 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6520 if (!dm_plane_state)
6523 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6525 if (old_dm_plane_state->dc_state) {
6526 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6527 dc_plane_state_retain(dm_plane_state->dc_state);
6530 return &dm_plane_state->base;
6533 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6534 struct drm_plane_state *state)
6536 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6538 if (dm_plane_state->dc_state)
6539 dc_plane_state_release(dm_plane_state->dc_state);
6541 drm_atomic_helper_plane_destroy_state(plane, state);
6544 static const struct drm_plane_funcs dm_plane_funcs = {
6545 .update_plane = drm_atomic_helper_update_plane,
6546 .disable_plane = drm_atomic_helper_disable_plane,
6547 .destroy = drm_primary_helper_destroy,
6548 .reset = dm_drm_plane_reset,
6549 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6550 .atomic_destroy_state = dm_drm_plane_destroy_state,
6551 .format_mod_supported = dm_plane_format_mod_supported,
6554 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6555 struct drm_plane_state *new_state)
6557 struct amdgpu_framebuffer *afb;
6558 struct drm_gem_object *obj;
6559 struct amdgpu_device *adev;
6560 struct amdgpu_bo *rbo;
6561 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6562 struct list_head list;
6563 struct ttm_validate_buffer tv;
6564 struct ww_acquire_ctx ticket;
6568 if (!new_state->fb) {
6569 DRM_DEBUG_DRIVER("No FB bound\n");
6573 afb = to_amdgpu_framebuffer(new_state->fb);
6574 obj = new_state->fb->obj[0];
6575 rbo = gem_to_amdgpu_bo(obj);
6576 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6577 INIT_LIST_HEAD(&list);
6581 list_add(&tv.head, &list);
6583 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6585 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6589 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6590 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6592 domain = AMDGPU_GEM_DOMAIN_VRAM;
6594 r = amdgpu_bo_pin(rbo, domain);
6595 if (unlikely(r != 0)) {
6596 if (r != -ERESTARTSYS)
6597 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6598 ttm_eu_backoff_reservation(&ticket, &list);
6602 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6603 if (unlikely(r != 0)) {
6604 amdgpu_bo_unpin(rbo);
6605 ttm_eu_backoff_reservation(&ticket, &list);
6606 DRM_ERROR("%p bind failed\n", rbo);
6610 ttm_eu_backoff_reservation(&ticket, &list);
6612 afb->address = amdgpu_bo_gpu_offset(rbo);
6617 * We don't do surface updates on planes that have been newly created,
6618 * but we also don't have the afb->address during atomic check.
6620 * Fill in buffer attributes depending on the address here, but only on
6621 * newly created planes since they're not being used by DC yet and this
6622 * won't modify global state.
6624 dm_plane_state_old = to_dm_plane_state(plane->state);
6625 dm_plane_state_new = to_dm_plane_state(new_state);
6627 if (dm_plane_state_new->dc_state &&
6628 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6629 struct dc_plane_state *plane_state =
6630 dm_plane_state_new->dc_state;
6631 bool force_disable_dcc = !plane_state->dcc.enable;
6633 fill_plane_buffer_attributes(
6634 adev, afb, plane_state->format, plane_state->rotation,
6636 &plane_state->tiling_info, &plane_state->plane_size,
6637 &plane_state->dcc, &plane_state->address,
6638 afb->tmz_surface, force_disable_dcc);
6644 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6645 struct drm_plane_state *old_state)
6647 struct amdgpu_bo *rbo;
6653 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6654 r = amdgpu_bo_reserve(rbo, false);
6656 DRM_ERROR("failed to reserve rbo before unpin\n");
6660 amdgpu_bo_unpin(rbo);
6661 amdgpu_bo_unreserve(rbo);
6662 amdgpu_bo_unref(&rbo);
6665 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6666 struct drm_crtc_state *new_crtc_state)
6668 struct drm_framebuffer *fb = state->fb;
6669 int min_downscale, max_upscale;
6671 int max_scale = INT_MAX;
6673 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6674 if (fb && state->crtc) {
6675 /* Validate viewport to cover the case when only the position changes */
6676 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6677 int viewport_width = state->crtc_w;
6678 int viewport_height = state->crtc_h;
6680 if (state->crtc_x < 0)
6681 viewport_width += state->crtc_x;
6682 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6683 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6685 if (state->crtc_y < 0)
6686 viewport_height += state->crtc_y;
6687 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6688 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6690 if (viewport_width < 0 || viewport_height < 0) {
6691 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6693 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6694 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6696 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6697 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6703 /* Get min/max allowed scaling factors from plane caps. */
6704 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6705 &min_downscale, &max_upscale);
6707 * Convert to drm convention: 16.16 fixed point, instead of dc's
6708 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6709 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6711 min_scale = (1000 << 16) / max_upscale;
6712 max_scale = (1000 << 16) / min_downscale;
6715 return drm_atomic_helper_check_plane_state(
6716 state, new_crtc_state, min_scale, max_scale, true, true);
6719 static int dm_plane_atomic_check(struct drm_plane *plane,
6720 struct drm_plane_state *state)
6722 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6723 struct dc *dc = adev->dm.dc;
6724 struct dm_plane_state *dm_plane_state;
6725 struct dc_scaling_info scaling_info;
6726 struct drm_crtc_state *new_crtc_state;
6729 trace_amdgpu_dm_plane_atomic_check(state);
6731 dm_plane_state = to_dm_plane_state(state);
6733 if (!dm_plane_state->dc_state)
6737 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6738 if (!new_crtc_state)
6741 ret = dm_plane_helper_check_state(state, new_crtc_state);
6745 ret = fill_dc_scaling_info(state, &scaling_info);
6749 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6755 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6756 struct drm_plane_state *new_plane_state)
6758 /* Only support async updates on cursor planes. */
6759 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6765 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6766 struct drm_plane_state *new_state)
6768 struct drm_plane_state *old_state =
6769 drm_atomic_get_old_plane_state(new_state->state, plane);
6771 trace_amdgpu_dm_atomic_update_cursor(new_state);
6773 swap(plane->state->fb, new_state->fb);
6775 plane->state->src_x = new_state->src_x;
6776 plane->state->src_y = new_state->src_y;
6777 plane->state->src_w = new_state->src_w;
6778 plane->state->src_h = new_state->src_h;
6779 plane->state->crtc_x = new_state->crtc_x;
6780 plane->state->crtc_y = new_state->crtc_y;
6781 plane->state->crtc_w = new_state->crtc_w;
6782 plane->state->crtc_h = new_state->crtc_h;
6784 handle_cursor_update(plane, old_state);
6787 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6788 .prepare_fb = dm_plane_helper_prepare_fb,
6789 .cleanup_fb = dm_plane_helper_cleanup_fb,
6790 .atomic_check = dm_plane_atomic_check,
6791 .atomic_async_check = dm_plane_atomic_async_check,
6792 .atomic_async_update = dm_plane_atomic_async_update
6796 * TODO: these are currently initialized to rgb formats only.
6797 * For future use cases we should either initialize them dynamically based on
6798 * plane capabilities, or initialize this array to all formats, so internal drm
6799 * check will succeed, and let DC implement proper check
6801 static const uint32_t rgb_formats[] = {
6802 DRM_FORMAT_XRGB8888,
6803 DRM_FORMAT_ARGB8888,
6804 DRM_FORMAT_RGBA8888,
6805 DRM_FORMAT_XRGB2101010,
6806 DRM_FORMAT_XBGR2101010,
6807 DRM_FORMAT_ARGB2101010,
6808 DRM_FORMAT_ABGR2101010,
6809 DRM_FORMAT_XBGR8888,
6810 DRM_FORMAT_ABGR8888,
6814 static const uint32_t overlay_formats[] = {
6815 DRM_FORMAT_XRGB8888,
6816 DRM_FORMAT_ARGB8888,
6817 DRM_FORMAT_RGBA8888,
6818 DRM_FORMAT_XBGR8888,
6819 DRM_FORMAT_ABGR8888,
6823 static const u32 cursor_formats[] = {
6827 static int get_plane_formats(const struct drm_plane *plane,
6828 const struct dc_plane_cap *plane_cap,
6829 uint32_t *formats, int max_formats)
6831 int i, num_formats = 0;
6834 * TODO: Query support for each group of formats directly from
6835 * DC plane caps. This will require adding more formats to the
6839 switch (plane->type) {
6840 case DRM_PLANE_TYPE_PRIMARY:
6841 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6842 if (num_formats >= max_formats)
6845 formats[num_formats++] = rgb_formats[i];
6848 if (plane_cap && plane_cap->pixel_format_support.nv12)
6849 formats[num_formats++] = DRM_FORMAT_NV12;
6850 if (plane_cap && plane_cap->pixel_format_support.p010)
6851 formats[num_formats++] = DRM_FORMAT_P010;
6852 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6853 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6854 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6855 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6856 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6860 case DRM_PLANE_TYPE_OVERLAY:
6861 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6862 if (num_formats >= max_formats)
6865 formats[num_formats++] = overlay_formats[i];
6869 case DRM_PLANE_TYPE_CURSOR:
6870 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6871 if (num_formats >= max_formats)
6874 formats[num_formats++] = cursor_formats[i];
6882 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6883 struct drm_plane *plane,
6884 unsigned long possible_crtcs,
6885 const struct dc_plane_cap *plane_cap)
6887 uint32_t formats[32];
6890 unsigned int supported_rotations;
6891 uint64_t *modifiers = NULL;
6893 num_formats = get_plane_formats(plane, plane_cap, formats,
6894 ARRAY_SIZE(formats));
6896 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6900 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6901 &dm_plane_funcs, formats, num_formats,
6902 modifiers, plane->type, NULL);
6907 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6908 plane_cap && plane_cap->per_pixel_alpha) {
6909 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6910 BIT(DRM_MODE_BLEND_PREMULTI);
6912 drm_plane_create_alpha_property(plane);
6913 drm_plane_create_blend_mode_property(plane, blend_caps);
6916 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6918 (plane_cap->pixel_format_support.nv12 ||
6919 plane_cap->pixel_format_support.p010)) {
6920 /* This only affects YUV formats. */
6921 drm_plane_create_color_properties(
6923 BIT(DRM_COLOR_YCBCR_BT601) |
6924 BIT(DRM_COLOR_YCBCR_BT709) |
6925 BIT(DRM_COLOR_YCBCR_BT2020),
6926 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6927 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6928 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6931 supported_rotations =
6932 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6933 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6935 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6936 plane->type != DRM_PLANE_TYPE_CURSOR)
6937 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6938 supported_rotations);
6940 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6942 /* Create (reset) the plane state */
6943 if (plane->funcs->reset)
6944 plane->funcs->reset(plane);
6949 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6950 struct drm_plane *plane,
6951 uint32_t crtc_index)
6953 struct amdgpu_crtc *acrtc = NULL;
6954 struct drm_plane *cursor_plane;
6958 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6962 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6963 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6965 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6969 res = drm_crtc_init_with_planes(
6974 &amdgpu_dm_crtc_funcs, NULL);
6979 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6981 /* Create (reset) the plane state */
6982 if (acrtc->base.funcs->reset)
6983 acrtc->base.funcs->reset(&acrtc->base);
6985 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6986 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6988 acrtc->crtc_id = crtc_index;
6989 acrtc->base.enabled = false;
6990 acrtc->otg_inst = -1;
6992 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6993 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6994 true, MAX_COLOR_LUT_ENTRIES);
6995 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7001 kfree(cursor_plane);
7006 static int to_drm_connector_type(enum signal_type st)
7009 case SIGNAL_TYPE_HDMI_TYPE_A:
7010 return DRM_MODE_CONNECTOR_HDMIA;
7011 case SIGNAL_TYPE_EDP:
7012 return DRM_MODE_CONNECTOR_eDP;
7013 case SIGNAL_TYPE_LVDS:
7014 return DRM_MODE_CONNECTOR_LVDS;
7015 case SIGNAL_TYPE_RGB:
7016 return DRM_MODE_CONNECTOR_VGA;
7017 case SIGNAL_TYPE_DISPLAY_PORT:
7018 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7019 return DRM_MODE_CONNECTOR_DisplayPort;
7020 case SIGNAL_TYPE_DVI_DUAL_LINK:
7021 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7022 return DRM_MODE_CONNECTOR_DVID;
7023 case SIGNAL_TYPE_VIRTUAL:
7024 return DRM_MODE_CONNECTOR_VIRTUAL;
7027 return DRM_MODE_CONNECTOR_Unknown;
7031 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7033 struct drm_encoder *encoder;
7035 /* There is only one encoder per connector */
7036 drm_connector_for_each_possible_encoder(connector, encoder)
7042 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7044 struct drm_encoder *encoder;
7045 struct amdgpu_encoder *amdgpu_encoder;
7047 encoder = amdgpu_dm_connector_to_encoder(connector);
7049 if (encoder == NULL)
7052 amdgpu_encoder = to_amdgpu_encoder(encoder);
7054 amdgpu_encoder->native_mode.clock = 0;
7056 if (!list_empty(&connector->probed_modes)) {
7057 struct drm_display_mode *preferred_mode = NULL;
7059 list_for_each_entry(preferred_mode,
7060 &connector->probed_modes,
7062 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7063 amdgpu_encoder->native_mode = *preferred_mode;
7071 static struct drm_display_mode *
7072 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7074 int hdisplay, int vdisplay)
7076 struct drm_device *dev = encoder->dev;
7077 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7078 struct drm_display_mode *mode = NULL;
7079 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7081 mode = drm_mode_duplicate(dev, native_mode);
7086 mode->hdisplay = hdisplay;
7087 mode->vdisplay = vdisplay;
7088 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7089 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7095 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7096 struct drm_connector *connector)
7098 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7099 struct drm_display_mode *mode = NULL;
7100 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7101 struct amdgpu_dm_connector *amdgpu_dm_connector =
7102 to_amdgpu_dm_connector(connector);
7106 char name[DRM_DISPLAY_MODE_LEN];
7109 } common_modes[] = {
7110 { "640x480", 640, 480},
7111 { "800x600", 800, 600},
7112 { "1024x768", 1024, 768},
7113 { "1280x720", 1280, 720},
7114 { "1280x800", 1280, 800},
7115 {"1280x1024", 1280, 1024},
7116 { "1440x900", 1440, 900},
7117 {"1680x1050", 1680, 1050},
7118 {"1600x1200", 1600, 1200},
7119 {"1920x1080", 1920, 1080},
7120 {"1920x1200", 1920, 1200}
7123 n = ARRAY_SIZE(common_modes);
7125 for (i = 0; i < n; i++) {
7126 struct drm_display_mode *curmode = NULL;
7127 bool mode_existed = false;
7129 if (common_modes[i].w > native_mode->hdisplay ||
7130 common_modes[i].h > native_mode->vdisplay ||
7131 (common_modes[i].w == native_mode->hdisplay &&
7132 common_modes[i].h == native_mode->vdisplay))
7135 list_for_each_entry(curmode, &connector->probed_modes, head) {
7136 if (common_modes[i].w == curmode->hdisplay &&
7137 common_modes[i].h == curmode->vdisplay) {
7138 mode_existed = true;
7146 mode = amdgpu_dm_create_common_mode(encoder,
7147 common_modes[i].name, common_modes[i].w,
7149 drm_mode_probed_add(connector, mode);
7150 amdgpu_dm_connector->num_modes++;
7154 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7157 struct amdgpu_dm_connector *amdgpu_dm_connector =
7158 to_amdgpu_dm_connector(connector);
7161 /* empty probed_modes */
7162 INIT_LIST_HEAD(&connector->probed_modes);
7163 amdgpu_dm_connector->num_modes =
7164 drm_add_edid_modes(connector, edid);
7166 /* sorting the probed modes before calling function
7167 * amdgpu_dm_get_native_mode() since EDID can have
7168 * more than one preferred mode. The modes that are
7169 * later in the probed mode list could be of higher
7170 * and preferred resolution. For example, 3840x2160
7171 * resolution in base EDID preferred timing and 4096x2160
7172 * preferred resolution in DID extension block later.
7174 drm_mode_sort(&connector->probed_modes);
7175 amdgpu_dm_get_native_mode(connector);
7177 /* Freesync capabilities are reset by calling
7178 * drm_add_edid_modes() and need to be
7181 amdgpu_dm_update_freesync_caps(connector, edid);
7183 amdgpu_dm_connector->num_modes = 0;
7187 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7188 struct drm_display_mode *mode)
7190 struct drm_display_mode *m;
7192 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7193 if (drm_mode_equal(m, mode))
7200 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7202 const struct drm_display_mode *m;
7203 struct drm_display_mode *new_mode;
7205 uint32_t new_modes_count = 0;
7207 /* Standard FPS values
7216 * 60 - Commonly used
7217 * 48,72,96 - Multiples of 24
7219 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7220 48000, 50000, 60000, 72000, 96000 };
7223 * Find mode with highest refresh rate with the same resolution
7224 * as the preferred mode. Some monitors report a preferred mode
7225 * with lower resolution than the highest refresh rate supported.
7228 m = get_highest_refresh_rate_mode(aconnector, true);
7232 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7233 uint64_t target_vtotal, target_vtotal_diff;
7236 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7239 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7240 common_rates[i] > aconnector->max_vfreq * 1000)
7243 num = (unsigned long long)m->clock * 1000 * 1000;
7244 den = common_rates[i] * (unsigned long long)m->htotal;
7245 target_vtotal = div_u64(num, den);
7246 target_vtotal_diff = target_vtotal - m->vtotal;
7248 /* Check for illegal modes */
7249 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7250 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7251 m->vtotal + target_vtotal_diff < m->vsync_end)
7254 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7258 new_mode->vtotal += (u16)target_vtotal_diff;
7259 new_mode->vsync_start += (u16)target_vtotal_diff;
7260 new_mode->vsync_end += (u16)target_vtotal_diff;
7261 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7262 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7264 if (!is_duplicate_mode(aconnector, new_mode)) {
7265 drm_mode_probed_add(&aconnector->base, new_mode);
7266 new_modes_count += 1;
7268 drm_mode_destroy(aconnector->base.dev, new_mode);
7271 return new_modes_count;
7274 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7277 struct amdgpu_dm_connector *amdgpu_dm_connector =
7278 to_amdgpu_dm_connector(connector);
7280 if (!(amdgpu_freesync_vid_mode && edid))
7283 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7284 amdgpu_dm_connector->num_modes +=
7285 add_fs_modes(amdgpu_dm_connector);
7288 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7290 struct amdgpu_dm_connector *amdgpu_dm_connector =
7291 to_amdgpu_dm_connector(connector);
7292 struct drm_encoder *encoder;
7293 struct edid *edid = amdgpu_dm_connector->edid;
7295 encoder = amdgpu_dm_connector_to_encoder(connector);
7297 if (!drm_edid_is_valid(edid)) {
7298 amdgpu_dm_connector->num_modes =
7299 drm_add_modes_noedid(connector, 640, 480);
7301 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7302 amdgpu_dm_connector_add_common_modes(encoder, connector);
7303 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7305 amdgpu_dm_fbc_init(connector);
7307 return amdgpu_dm_connector->num_modes;
7310 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7311 struct amdgpu_dm_connector *aconnector,
7313 struct dc_link *link,
7316 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7319 * Some of the properties below require access to state, like bpc.
7320 * Allocate some default initial connector state with our reset helper.
7322 if (aconnector->base.funcs->reset)
7323 aconnector->base.funcs->reset(&aconnector->base);
7325 aconnector->connector_id = link_index;
7326 aconnector->dc_link = link;
7327 aconnector->base.interlace_allowed = false;
7328 aconnector->base.doublescan_allowed = false;
7329 aconnector->base.stereo_allowed = false;
7330 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7331 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7332 aconnector->audio_inst = -1;
7333 mutex_init(&aconnector->hpd_lock);
7336 * configure support HPD hot plug connector_>polled default value is 0
7337 * which means HPD hot plug not supported
7339 switch (connector_type) {
7340 case DRM_MODE_CONNECTOR_HDMIA:
7341 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7342 aconnector->base.ycbcr_420_allowed =
7343 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7345 case DRM_MODE_CONNECTOR_DisplayPort:
7346 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7347 aconnector->base.ycbcr_420_allowed =
7348 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7350 case DRM_MODE_CONNECTOR_DVID:
7351 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7357 drm_object_attach_property(&aconnector->base.base,
7358 dm->ddev->mode_config.scaling_mode_property,
7359 DRM_MODE_SCALE_NONE);
7361 drm_object_attach_property(&aconnector->base.base,
7362 adev->mode_info.underscan_property,
7364 drm_object_attach_property(&aconnector->base.base,
7365 adev->mode_info.underscan_hborder_property,
7367 drm_object_attach_property(&aconnector->base.base,
7368 adev->mode_info.underscan_vborder_property,
7371 if (!aconnector->mst_port)
7372 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7374 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7375 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7376 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7378 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7379 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7380 drm_object_attach_property(&aconnector->base.base,
7381 adev->mode_info.abm_level_property, 0);
7384 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7385 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7386 connector_type == DRM_MODE_CONNECTOR_eDP) {
7387 drm_object_attach_property(
7388 &aconnector->base.base,
7389 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7391 if (!aconnector->mst_port)
7392 drm_connector_attach_vrr_capable_property(&aconnector->base);
7394 #ifdef CONFIG_DRM_AMD_DC_HDCP
7395 if (adev->dm.hdcp_workqueue)
7396 drm_connector_attach_content_protection_property(&aconnector->base, true);
7401 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7402 struct i2c_msg *msgs, int num)
7404 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7405 struct ddc_service *ddc_service = i2c->ddc_service;
7406 struct i2c_command cmd;
7410 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7415 cmd.number_of_payloads = num;
7416 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7419 for (i = 0; i < num; i++) {
7420 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7421 cmd.payloads[i].address = msgs[i].addr;
7422 cmd.payloads[i].length = msgs[i].len;
7423 cmd.payloads[i].data = msgs[i].buf;
7427 ddc_service->ctx->dc,
7428 ddc_service->ddc_pin->hw_info.ddc_channel,
7432 kfree(cmd.payloads);
7436 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7438 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7441 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7442 .master_xfer = amdgpu_dm_i2c_xfer,
7443 .functionality = amdgpu_dm_i2c_func,
7446 static struct amdgpu_i2c_adapter *
7447 create_i2c(struct ddc_service *ddc_service,
7451 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7452 struct amdgpu_i2c_adapter *i2c;
7454 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7457 i2c->base.owner = THIS_MODULE;
7458 i2c->base.class = I2C_CLASS_DDC;
7459 i2c->base.dev.parent = &adev->pdev->dev;
7460 i2c->base.algo = &amdgpu_dm_i2c_algo;
7461 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7462 i2c_set_adapdata(&i2c->base, i2c);
7463 i2c->ddc_service = ddc_service;
7464 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7471 * Note: this function assumes that dc_link_detect() was called for the
7472 * dc_link which will be represented by this aconnector.
7474 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7475 struct amdgpu_dm_connector *aconnector,
7476 uint32_t link_index,
7477 struct amdgpu_encoder *aencoder)
7481 struct dc *dc = dm->dc;
7482 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7483 struct amdgpu_i2c_adapter *i2c;
7485 link->priv = aconnector;
7487 DRM_DEBUG_DRIVER("%s()\n", __func__);
7489 i2c = create_i2c(link->ddc, link->link_index, &res);
7491 DRM_ERROR("Failed to create i2c adapter data\n");
7495 aconnector->i2c = i2c;
7496 res = i2c_add_adapter(&i2c->base);
7499 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7503 connector_type = to_drm_connector_type(link->connector_signal);
7505 res = drm_connector_init_with_ddc(
7508 &amdgpu_dm_connector_funcs,
7513 DRM_ERROR("connector_init failed\n");
7514 aconnector->connector_id = -1;
7518 drm_connector_helper_add(
7520 &amdgpu_dm_connector_helper_funcs);
7522 amdgpu_dm_connector_init_helper(
7529 drm_connector_attach_encoder(
7530 &aconnector->base, &aencoder->base);
7532 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7533 || connector_type == DRM_MODE_CONNECTOR_eDP)
7534 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7539 aconnector->i2c = NULL;
7544 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7546 switch (adev->mode_info.num_crtc) {
7563 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7564 struct amdgpu_encoder *aencoder,
7565 uint32_t link_index)
7567 struct amdgpu_device *adev = drm_to_adev(dev);
7569 int res = drm_encoder_init(dev,
7571 &amdgpu_dm_encoder_funcs,
7572 DRM_MODE_ENCODER_TMDS,
7575 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7578 aencoder->encoder_id = link_index;
7580 aencoder->encoder_id = -1;
7582 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7587 static void manage_dm_interrupts(struct amdgpu_device *adev,
7588 struct amdgpu_crtc *acrtc,
7592 * We have no guarantee that the frontend index maps to the same
7593 * backend index - some even map to more than one.
7595 * TODO: Use a different interrupt or check DC itself for the mapping.
7598 amdgpu_display_crtc_idx_to_irq_type(
7603 drm_crtc_vblank_on(&acrtc->base);
7606 &adev->pageflip_irq,
7608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7623 &adev->pageflip_irq,
7625 drm_crtc_vblank_off(&acrtc->base);
7629 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7630 struct amdgpu_crtc *acrtc)
7633 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7636 * This reads the current state for the IRQ and force reapplies
7637 * the setting to hardware.
7639 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7643 is_scaling_state_different(const struct dm_connector_state *dm_state,
7644 const struct dm_connector_state *old_dm_state)
7646 if (dm_state->scaling != old_dm_state->scaling)
7648 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7649 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7651 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7652 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7654 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7655 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7660 #ifdef CONFIG_DRM_AMD_DC_HDCP
7661 static bool is_content_protection_different(struct drm_connector_state *state,
7662 const struct drm_connector_state *old_state,
7663 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7665 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7666 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7668 /* Handle: Type0/1 change */
7669 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7670 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7671 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7675 /* CP is being re enabled, ignore this
7677 * Handles: ENABLED -> DESIRED
7679 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7680 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7681 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7685 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7687 * Handles: UNDESIRED -> ENABLED
7689 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7690 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7691 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7693 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7694 * hot-plug, headless s3, dpms
7696 * Handles: DESIRED -> DESIRED (Special case)
7698 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7699 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7700 dm_con_state->update_hdcp = false;
7705 * Handles: UNDESIRED -> UNDESIRED
7706 * DESIRED -> DESIRED
7707 * ENABLED -> ENABLED
7709 if (old_state->content_protection == state->content_protection)
7713 * Handles: UNDESIRED -> DESIRED
7714 * DESIRED -> UNDESIRED
7715 * ENABLED -> UNDESIRED
7717 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7721 * Handles: DESIRED -> ENABLED
7727 static void remove_stream(struct amdgpu_device *adev,
7728 struct amdgpu_crtc *acrtc,
7729 struct dc_stream_state *stream)
7731 /* this is the update mode case */
7733 acrtc->otg_inst = -1;
7734 acrtc->enabled = false;
7737 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7738 struct dc_cursor_position *position)
7740 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7742 int xorigin = 0, yorigin = 0;
7744 if (!crtc || !plane->state->fb)
7747 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7748 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7749 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7751 plane->state->crtc_w,
7752 plane->state->crtc_h);
7756 x = plane->state->crtc_x;
7757 y = plane->state->crtc_y;
7759 if (x <= -amdgpu_crtc->max_cursor_width ||
7760 y <= -amdgpu_crtc->max_cursor_height)
7764 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7768 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7771 position->enable = true;
7772 position->translate_by_source = true;
7775 position->x_hotspot = xorigin;
7776 position->y_hotspot = yorigin;
7781 static void handle_cursor_update(struct drm_plane *plane,
7782 struct drm_plane_state *old_plane_state)
7784 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7785 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7786 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7787 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7788 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7789 uint64_t address = afb ? afb->address : 0;
7790 struct dc_cursor_position position = {0};
7791 struct dc_cursor_attributes attributes;
7794 if (!plane->state->fb && !old_plane_state->fb)
7797 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7799 amdgpu_crtc->crtc_id,
7800 plane->state->crtc_w,
7801 plane->state->crtc_h);
7803 ret = get_cursor_position(plane, crtc, &position);
7807 if (!position.enable) {
7808 /* turn off cursor */
7809 if (crtc_state && crtc_state->stream) {
7810 mutex_lock(&adev->dm.dc_lock);
7811 dc_stream_set_cursor_position(crtc_state->stream,
7813 mutex_unlock(&adev->dm.dc_lock);
7818 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7819 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7821 memset(&attributes, 0, sizeof(attributes));
7822 attributes.address.high_part = upper_32_bits(address);
7823 attributes.address.low_part = lower_32_bits(address);
7824 attributes.width = plane->state->crtc_w;
7825 attributes.height = plane->state->crtc_h;
7826 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7827 attributes.rotation_angle = 0;
7828 attributes.attribute_flags.value = 0;
7830 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7832 if (crtc_state->stream) {
7833 mutex_lock(&adev->dm.dc_lock);
7834 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7836 DRM_ERROR("DC failed to set cursor attributes\n");
7838 if (!dc_stream_set_cursor_position(crtc_state->stream,
7840 DRM_ERROR("DC failed to set cursor position\n");
7841 mutex_unlock(&adev->dm.dc_lock);
7845 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7848 assert_spin_locked(&acrtc->base.dev->event_lock);
7849 WARN_ON(acrtc->event);
7851 acrtc->event = acrtc->base.state->event;
7853 /* Set the flip status */
7854 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7856 /* Mark this event as consumed */
7857 acrtc->base.state->event = NULL;
7859 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7863 static void update_freesync_state_on_stream(
7864 struct amdgpu_display_manager *dm,
7865 struct dm_crtc_state *new_crtc_state,
7866 struct dc_stream_state *new_stream,
7867 struct dc_plane_state *surface,
7868 u32 flip_timestamp_in_us)
7870 struct mod_vrr_params vrr_params;
7871 struct dc_info_packet vrr_infopacket = {0};
7872 struct amdgpu_device *adev = dm->adev;
7873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7874 unsigned long flags;
7875 bool pack_sdp_v1_3 = false;
7881 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7882 * For now it's sufficient to just guard against these conditions.
7885 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7888 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7889 vrr_params = acrtc->dm_irq_params.vrr_params;
7892 mod_freesync_handle_preflip(
7893 dm->freesync_module,
7896 flip_timestamp_in_us,
7899 if (adev->family < AMDGPU_FAMILY_AI &&
7900 amdgpu_dm_vrr_active(new_crtc_state)) {
7901 mod_freesync_handle_v_update(dm->freesync_module,
7902 new_stream, &vrr_params);
7904 /* Need to call this before the frame ends. */
7905 dc_stream_adjust_vmin_vmax(dm->dc,
7906 new_crtc_state->stream,
7907 &vrr_params.adjust);
7911 mod_freesync_build_vrr_infopacket(
7912 dm->freesync_module,
7916 TRANSFER_FUNC_UNKNOWN,
7920 new_crtc_state->freesync_timing_changed |=
7921 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7923 sizeof(vrr_params.adjust)) != 0);
7925 new_crtc_state->freesync_vrr_info_changed |=
7926 (memcmp(&new_crtc_state->vrr_infopacket,
7928 sizeof(vrr_infopacket)) != 0);
7930 acrtc->dm_irq_params.vrr_params = vrr_params;
7931 new_crtc_state->vrr_infopacket = vrr_infopacket;
7933 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7934 new_stream->vrr_infopacket = vrr_infopacket;
7936 if (new_crtc_state->freesync_vrr_info_changed)
7937 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7938 new_crtc_state->base.crtc->base.id,
7939 (int)new_crtc_state->base.vrr_enabled,
7940 (int)vrr_params.state);
7942 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7945 static void update_stream_irq_parameters(
7946 struct amdgpu_display_manager *dm,
7947 struct dm_crtc_state *new_crtc_state)
7949 struct dc_stream_state *new_stream = new_crtc_state->stream;
7950 struct mod_vrr_params vrr_params;
7951 struct mod_freesync_config config = new_crtc_state->freesync_config;
7952 struct amdgpu_device *adev = dm->adev;
7953 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7954 unsigned long flags;
7960 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7961 * For now it's sufficient to just guard against these conditions.
7963 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7966 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7967 vrr_params = acrtc->dm_irq_params.vrr_params;
7969 if (new_crtc_state->vrr_supported &&
7970 config.min_refresh_in_uhz &&
7971 config.max_refresh_in_uhz) {
7973 * if freesync compatible mode was set, config.state will be set
7976 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7977 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7978 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7979 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7980 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7981 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7982 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7984 config.state = new_crtc_state->base.vrr_enabled ?
7985 VRR_STATE_ACTIVE_VARIABLE :
7989 config.state = VRR_STATE_UNSUPPORTED;
7992 mod_freesync_build_vrr_params(dm->freesync_module,
7994 &config, &vrr_params);
7996 new_crtc_state->freesync_timing_changed |=
7997 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7998 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8000 new_crtc_state->freesync_config = config;
8001 /* Copy state for access from DM IRQ handler */
8002 acrtc->dm_irq_params.freesync_config = config;
8003 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8004 acrtc->dm_irq_params.vrr_params = vrr_params;
8005 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8008 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8009 struct dm_crtc_state *new_state)
8011 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8012 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8014 if (!old_vrr_active && new_vrr_active) {
8015 /* Transition VRR inactive -> active:
8016 * While VRR is active, we must not disable vblank irq, as a
8017 * reenable after disable would compute bogus vblank/pflip
8018 * timestamps if it likely happened inside display front-porch.
8020 * We also need vupdate irq for the actual core vblank handling
8023 dm_set_vupdate_irq(new_state->base.crtc, true);
8024 drm_crtc_vblank_get(new_state->base.crtc);
8025 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8026 __func__, new_state->base.crtc->base.id);
8027 } else if (old_vrr_active && !new_vrr_active) {
8028 /* Transition VRR active -> inactive:
8029 * Allow vblank irq disable again for fixed refresh rate.
8031 dm_set_vupdate_irq(new_state->base.crtc, false);
8032 drm_crtc_vblank_put(new_state->base.crtc);
8033 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8034 __func__, new_state->base.crtc->base.id);
8038 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8040 struct drm_plane *plane;
8041 struct drm_plane_state *old_plane_state, *new_plane_state;
8045 * TODO: Make this per-stream so we don't issue redundant updates for
8046 * commits with multiple streams.
8048 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8050 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8051 handle_cursor_update(plane, old_plane_state);
8054 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8055 struct dc_state *dc_state,
8056 struct drm_device *dev,
8057 struct amdgpu_display_manager *dm,
8058 struct drm_crtc *pcrtc,
8059 bool wait_for_vblank)
8062 uint64_t timestamp_ns;
8063 struct drm_plane *plane;
8064 struct drm_plane_state *old_plane_state, *new_plane_state;
8065 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8066 struct drm_crtc_state *new_pcrtc_state =
8067 drm_atomic_get_new_crtc_state(state, pcrtc);
8068 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8069 struct dm_crtc_state *dm_old_crtc_state =
8070 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8071 int planes_count = 0, vpos, hpos;
8073 unsigned long flags;
8074 struct amdgpu_bo *abo;
8075 uint32_t target_vblank, last_flip_vblank;
8076 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8077 bool pflip_present = false;
8079 struct dc_surface_update surface_updates[MAX_SURFACES];
8080 struct dc_plane_info plane_infos[MAX_SURFACES];
8081 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8082 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8083 struct dc_stream_update stream_update;
8086 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8089 dm_error("Failed to allocate update bundle\n");
8094 * Disable the cursor first if we're disabling all the planes.
8095 * It'll remain on the screen after the planes are re-enabled
8098 if (acrtc_state->active_planes == 0)
8099 amdgpu_dm_commit_cursors(state);
8101 /* update planes when needed */
8102 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8103 struct drm_crtc *crtc = new_plane_state->crtc;
8104 struct drm_crtc_state *new_crtc_state;
8105 struct drm_framebuffer *fb = new_plane_state->fb;
8106 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8107 bool plane_needs_flip;
8108 struct dc_plane_state *dc_plane;
8109 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8111 /* Cursor plane is handled after stream updates */
8112 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8115 if (!fb || !crtc || pcrtc != crtc)
8118 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8119 if (!new_crtc_state->active)
8122 dc_plane = dm_new_plane_state->dc_state;
8124 bundle->surface_updates[planes_count].surface = dc_plane;
8125 if (new_pcrtc_state->color_mgmt_changed) {
8126 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8127 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8128 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8131 fill_dc_scaling_info(new_plane_state,
8132 &bundle->scaling_infos[planes_count]);
8134 bundle->surface_updates[planes_count].scaling_info =
8135 &bundle->scaling_infos[planes_count];
8137 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8139 pflip_present = pflip_present || plane_needs_flip;
8141 if (!plane_needs_flip) {
8146 abo = gem_to_amdgpu_bo(fb->obj[0]);
8149 * Wait for all fences on this FB. Do limited wait to avoid
8150 * deadlock during GPU reset when this fence will not signal
8151 * but we hold reservation lock for the BO.
8153 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8155 msecs_to_jiffies(5000));
8156 if (unlikely(r <= 0))
8157 DRM_ERROR("Waiting for fences timed out!");
8159 fill_dc_plane_info_and_addr(
8160 dm->adev, new_plane_state,
8162 &bundle->plane_infos[planes_count],
8163 &bundle->flip_addrs[planes_count].address,
8164 afb->tmz_surface, false);
8166 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8167 new_plane_state->plane->index,
8168 bundle->plane_infos[planes_count].dcc.enable);
8170 bundle->surface_updates[planes_count].plane_info =
8171 &bundle->plane_infos[planes_count];
8174 * Only allow immediate flips for fast updates that don't
8175 * change FB pitch, DCC state, rotation or mirroing.
8177 bundle->flip_addrs[planes_count].flip_immediate =
8178 crtc->state->async_flip &&
8179 acrtc_state->update_type == UPDATE_TYPE_FAST;
8181 timestamp_ns = ktime_get_ns();
8182 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8183 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8184 bundle->surface_updates[planes_count].surface = dc_plane;
8186 if (!bundle->surface_updates[planes_count].surface) {
8187 DRM_ERROR("No surface for CRTC: id=%d\n",
8188 acrtc_attach->crtc_id);
8192 if (plane == pcrtc->primary)
8193 update_freesync_state_on_stream(
8196 acrtc_state->stream,
8198 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8200 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8202 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8203 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8209 if (pflip_present) {
8211 /* Use old throttling in non-vrr fixed refresh rate mode
8212 * to keep flip scheduling based on target vblank counts
8213 * working in a backwards compatible way, e.g., for
8214 * clients using the GLX_OML_sync_control extension or
8215 * DRI3/Present extension with defined target_msc.
8217 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8220 /* For variable refresh rate mode only:
8221 * Get vblank of last completed flip to avoid > 1 vrr
8222 * flips per video frame by use of throttling, but allow
8223 * flip programming anywhere in the possibly large
8224 * variable vrr vblank interval for fine-grained flip
8225 * timing control and more opportunity to avoid stutter
8226 * on late submission of flips.
8228 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8229 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8230 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8233 target_vblank = last_flip_vblank + wait_for_vblank;
8236 * Wait until we're out of the vertical blank period before the one
8237 * targeted by the flip
8239 while ((acrtc_attach->enabled &&
8240 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8241 0, &vpos, &hpos, NULL,
8242 NULL, &pcrtc->hwmode)
8243 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8244 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8245 (int)(target_vblank -
8246 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8247 usleep_range(1000, 1100);
8251 * Prepare the flip event for the pageflip interrupt to handle.
8253 * This only works in the case where we've already turned on the
8254 * appropriate hardware blocks (eg. HUBP) so in the transition case
8255 * from 0 -> n planes we have to skip a hardware generated event
8256 * and rely on sending it from software.
8258 if (acrtc_attach->base.state->event &&
8259 acrtc_state->active_planes > 0) {
8260 drm_crtc_vblank_get(pcrtc);
8262 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8264 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8265 prepare_flip_isr(acrtc_attach);
8267 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8270 if (acrtc_state->stream) {
8271 if (acrtc_state->freesync_vrr_info_changed)
8272 bundle->stream_update.vrr_infopacket =
8273 &acrtc_state->stream->vrr_infopacket;
8277 /* Update the planes if changed or disable if we don't have any. */
8278 if ((planes_count || acrtc_state->active_planes == 0) &&
8279 acrtc_state->stream) {
8280 bundle->stream_update.stream = acrtc_state->stream;
8281 if (new_pcrtc_state->mode_changed) {
8282 bundle->stream_update.src = acrtc_state->stream->src;
8283 bundle->stream_update.dst = acrtc_state->stream->dst;
8286 if (new_pcrtc_state->color_mgmt_changed) {
8288 * TODO: This isn't fully correct since we've actually
8289 * already modified the stream in place.
8291 bundle->stream_update.gamut_remap =
8292 &acrtc_state->stream->gamut_remap_matrix;
8293 bundle->stream_update.output_csc_transform =
8294 &acrtc_state->stream->csc_color_matrix;
8295 bundle->stream_update.out_transfer_func =
8296 acrtc_state->stream->out_transfer_func;
8299 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8300 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8301 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8304 * If FreeSync state on the stream has changed then we need to
8305 * re-adjust the min/max bounds now that DC doesn't handle this
8306 * as part of commit.
8308 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8309 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8310 dc_stream_adjust_vmin_vmax(
8311 dm->dc, acrtc_state->stream,
8312 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8313 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8315 mutex_lock(&dm->dc_lock);
8316 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8317 acrtc_state->stream->link->psr_settings.psr_allow_active)
8318 amdgpu_dm_psr_disable(acrtc_state->stream);
8320 dc_commit_updates_for_stream(dm->dc,
8321 bundle->surface_updates,
8323 acrtc_state->stream,
8324 &bundle->stream_update,
8328 * Enable or disable the interrupts on the backend.
8330 * Most pipes are put into power gating when unused.
8332 * When power gating is enabled on a pipe we lose the
8333 * interrupt enablement state when power gating is disabled.
8335 * So we need to update the IRQ control state in hardware
8336 * whenever the pipe turns on (since it could be previously
8337 * power gated) or off (since some pipes can't be power gated
8340 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8341 dm_update_pflip_irq_state(drm_to_adev(dev),
8344 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8345 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8346 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8347 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8348 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8349 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8350 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8351 amdgpu_dm_psr_enable(acrtc_state->stream);
8354 mutex_unlock(&dm->dc_lock);
8358 * Update cursor state *after* programming all the planes.
8359 * This avoids redundant programming in the case where we're going
8360 * to be disabling a single plane - those pipes are being disabled.
8362 if (acrtc_state->active_planes)
8363 amdgpu_dm_commit_cursors(state);
8369 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8370 struct drm_atomic_state *state)
8372 struct amdgpu_device *adev = drm_to_adev(dev);
8373 struct amdgpu_dm_connector *aconnector;
8374 struct drm_connector *connector;
8375 struct drm_connector_state *old_con_state, *new_con_state;
8376 struct drm_crtc_state *new_crtc_state;
8377 struct dm_crtc_state *new_dm_crtc_state;
8378 const struct dc_stream_status *status;
8381 /* Notify device removals. */
8382 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8383 if (old_con_state->crtc != new_con_state->crtc) {
8384 /* CRTC changes require notification. */
8388 if (!new_con_state->crtc)
8391 new_crtc_state = drm_atomic_get_new_crtc_state(
8392 state, new_con_state->crtc);
8394 if (!new_crtc_state)
8397 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8401 aconnector = to_amdgpu_dm_connector(connector);
8403 mutex_lock(&adev->dm.audio_lock);
8404 inst = aconnector->audio_inst;
8405 aconnector->audio_inst = -1;
8406 mutex_unlock(&adev->dm.audio_lock);
8408 amdgpu_dm_audio_eld_notify(adev, inst);
8411 /* Notify audio device additions. */
8412 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8413 if (!new_con_state->crtc)
8416 new_crtc_state = drm_atomic_get_new_crtc_state(
8417 state, new_con_state->crtc);
8419 if (!new_crtc_state)
8422 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8425 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8426 if (!new_dm_crtc_state->stream)
8429 status = dc_stream_get_status(new_dm_crtc_state->stream);
8433 aconnector = to_amdgpu_dm_connector(connector);
8435 mutex_lock(&adev->dm.audio_lock);
8436 inst = status->audio_inst;
8437 aconnector->audio_inst = inst;
8438 mutex_unlock(&adev->dm.audio_lock);
8440 amdgpu_dm_audio_eld_notify(adev, inst);
8445 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8446 * @crtc_state: the DRM CRTC state
8447 * @stream_state: the DC stream state.
8449 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8450 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8452 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8453 struct dc_stream_state *stream_state)
8455 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8459 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8460 * @state: The atomic state to commit
8462 * This will tell DC to commit the constructed DC state from atomic_check,
8463 * programming the hardware. Any failures here implies a hardware failure, since
8464 * atomic check should have filtered anything non-kosher.
8466 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8468 struct drm_device *dev = state->dev;
8469 struct amdgpu_device *adev = drm_to_adev(dev);
8470 struct amdgpu_display_manager *dm = &adev->dm;
8471 struct dm_atomic_state *dm_state;
8472 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8474 struct drm_crtc *crtc;
8475 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8476 unsigned long flags;
8477 bool wait_for_vblank = true;
8478 struct drm_connector *connector;
8479 struct drm_connector_state *old_con_state, *new_con_state;
8480 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8481 int crtc_disable_count = 0;
8482 bool mode_set_reset_required = false;
8484 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8486 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8488 dm_state = dm_atomic_get_new_state(state);
8489 if (dm_state && dm_state->context) {
8490 dc_state = dm_state->context;
8492 /* No state changes, retain current state. */
8493 dc_state_temp = dc_create_state(dm->dc);
8494 ASSERT(dc_state_temp);
8495 dc_state = dc_state_temp;
8496 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8499 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8500 new_crtc_state, i) {
8501 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8503 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8505 if (old_crtc_state->active &&
8506 (!new_crtc_state->active ||
8507 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8508 manage_dm_interrupts(adev, acrtc, false);
8509 dc_stream_release(dm_old_crtc_state->stream);
8513 drm_atomic_helper_calc_timestamping_constants(state);
8515 /* update changed items */
8516 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8517 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8519 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8520 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8523 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8524 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8525 "connectors_changed:%d\n",
8527 new_crtc_state->enable,
8528 new_crtc_state->active,
8529 new_crtc_state->planes_changed,
8530 new_crtc_state->mode_changed,
8531 new_crtc_state->active_changed,
8532 new_crtc_state->connectors_changed);
8534 /* Disable cursor if disabling crtc */
8535 if (old_crtc_state->active && !new_crtc_state->active) {
8536 struct dc_cursor_position position;
8538 memset(&position, 0, sizeof(position));
8539 mutex_lock(&dm->dc_lock);
8540 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8541 mutex_unlock(&dm->dc_lock);
8544 /* Copy all transient state flags into dc state */
8545 if (dm_new_crtc_state->stream) {
8546 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8547 dm_new_crtc_state->stream);
8550 /* handles headless hotplug case, updating new_state and
8551 * aconnector as needed
8554 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8556 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8558 if (!dm_new_crtc_state->stream) {
8560 * this could happen because of issues with
8561 * userspace notifications delivery.
8562 * In this case userspace tries to set mode on
8563 * display which is disconnected in fact.
8564 * dc_sink is NULL in this case on aconnector.
8565 * We expect reset mode will come soon.
8567 * This can also happen when unplug is done
8568 * during resume sequence ended
8570 * In this case, we want to pretend we still
8571 * have a sink to keep the pipe running so that
8572 * hw state is consistent with the sw state
8574 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8575 __func__, acrtc->base.base.id);
8579 if (dm_old_crtc_state->stream)
8580 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8582 pm_runtime_get_noresume(dev->dev);
8584 acrtc->enabled = true;
8585 acrtc->hw_mode = new_crtc_state->mode;
8586 crtc->hwmode = new_crtc_state->mode;
8587 mode_set_reset_required = true;
8588 } else if (modereset_required(new_crtc_state)) {
8589 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8590 /* i.e. reset mode */
8591 if (dm_old_crtc_state->stream)
8592 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8594 mode_set_reset_required = true;
8596 } /* for_each_crtc_in_state() */
8599 /* if there mode set or reset, disable eDP PSR */
8600 if (mode_set_reset_required)
8601 amdgpu_dm_psr_disable_all(dm);
8603 dm_enable_per_frame_crtc_master_sync(dc_state);
8604 mutex_lock(&dm->dc_lock);
8605 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8606 mutex_unlock(&dm->dc_lock);
8609 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8610 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8612 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8614 if (dm_new_crtc_state->stream != NULL) {
8615 const struct dc_stream_status *status =
8616 dc_stream_get_status(dm_new_crtc_state->stream);
8619 status = dc_stream_get_status_from_state(dc_state,
8620 dm_new_crtc_state->stream);
8622 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8624 acrtc->otg_inst = status->primary_otg_inst;
8627 #ifdef CONFIG_DRM_AMD_DC_HDCP
8628 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8629 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8630 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8631 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8633 new_crtc_state = NULL;
8636 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8638 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8640 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8641 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8642 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8643 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8644 dm_new_con_state->update_hdcp = true;
8648 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8649 hdcp_update_display(
8650 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8651 new_con_state->hdcp_content_type,
8652 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8656 /* Handle connector state changes */
8657 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8658 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8659 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8660 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8661 struct dc_surface_update dummy_updates[MAX_SURFACES];
8662 struct dc_stream_update stream_update;
8663 struct dc_info_packet hdr_packet;
8664 struct dc_stream_status *status = NULL;
8665 bool abm_changed, hdr_changed, scaling_changed;
8667 memset(&dummy_updates, 0, sizeof(dummy_updates));
8668 memset(&stream_update, 0, sizeof(stream_update));
8671 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8672 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8675 /* Skip any modesets/resets */
8676 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8679 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8680 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8682 scaling_changed = is_scaling_state_different(dm_new_con_state,
8685 abm_changed = dm_new_crtc_state->abm_level !=
8686 dm_old_crtc_state->abm_level;
8689 is_hdr_metadata_different(old_con_state, new_con_state);
8691 if (!scaling_changed && !abm_changed && !hdr_changed)
8694 stream_update.stream = dm_new_crtc_state->stream;
8695 if (scaling_changed) {
8696 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8697 dm_new_con_state, dm_new_crtc_state->stream);
8699 stream_update.src = dm_new_crtc_state->stream->src;
8700 stream_update.dst = dm_new_crtc_state->stream->dst;
8704 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8706 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8710 fill_hdr_info_packet(new_con_state, &hdr_packet);
8711 stream_update.hdr_static_metadata = &hdr_packet;
8714 status = dc_stream_get_status(dm_new_crtc_state->stream);
8716 WARN_ON(!status->plane_count);
8719 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8720 * Here we create an empty update on each plane.
8721 * To fix this, DC should permit updating only stream properties.
8723 for (j = 0; j < status->plane_count; j++)
8724 dummy_updates[j].surface = status->plane_states[0];
8727 mutex_lock(&dm->dc_lock);
8728 dc_commit_updates_for_stream(dm->dc,
8730 status->plane_count,
8731 dm_new_crtc_state->stream,
8734 mutex_unlock(&dm->dc_lock);
8737 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8738 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8739 new_crtc_state, i) {
8740 if (old_crtc_state->active && !new_crtc_state->active)
8741 crtc_disable_count++;
8743 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8744 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8746 /* For freesync config update on crtc state and params for irq */
8747 update_stream_irq_parameters(dm, dm_new_crtc_state);
8749 /* Handle vrr on->off / off->on transitions */
8750 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8755 * Enable interrupts for CRTCs that are newly enabled or went through
8756 * a modeset. It was intentionally deferred until after the front end
8757 * state was modified to wait until the OTG was on and so the IRQ
8758 * handlers didn't access stale or invalid state.
8760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8762 #ifdef CONFIG_DEBUG_FS
8763 bool configure_crc = false;
8764 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8766 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8768 if (new_crtc_state->active &&
8769 (!old_crtc_state->active ||
8770 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8771 dc_stream_retain(dm_new_crtc_state->stream);
8772 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8773 manage_dm_interrupts(adev, acrtc, true);
8775 #ifdef CONFIG_DEBUG_FS
8777 * Frontend may have changed so reapply the CRC capture
8778 * settings for the stream.
8780 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8781 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8782 cur_crc_src = acrtc->dm_irq_params.crc_src;
8783 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8785 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8786 configure_crc = true;
8787 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8788 if (amdgpu_dm_crc_window_is_activated(crtc))
8789 configure_crc = false;
8794 amdgpu_dm_crtc_configure_crc_source(
8795 crtc, dm_new_crtc_state, cur_crc_src);
8800 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8801 if (new_crtc_state->async_flip)
8802 wait_for_vblank = false;
8804 /* update planes when needed per crtc*/
8805 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8806 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8808 if (dm_new_crtc_state->stream)
8809 amdgpu_dm_commit_planes(state, dc_state, dev,
8810 dm, crtc, wait_for_vblank);
8813 /* Update audio instances for each connector. */
8814 amdgpu_dm_commit_audio(dev, state);
8817 * send vblank event on all events not handled in flip and
8818 * mark consumed event for drm_atomic_helper_commit_hw_done
8820 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8821 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8823 if (new_crtc_state->event)
8824 drm_send_event_locked(dev, &new_crtc_state->event->base);
8826 new_crtc_state->event = NULL;
8828 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8830 /* Signal HW programming completion */
8831 drm_atomic_helper_commit_hw_done(state);
8833 if (wait_for_vblank)
8834 drm_atomic_helper_wait_for_flip_done(dev, state);
8836 drm_atomic_helper_cleanup_planes(dev, state);
8838 /* return the stolen vga memory back to VRAM */
8839 if (!adev->mman.keep_stolen_vga_memory)
8840 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8841 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8844 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8845 * so we can put the GPU into runtime suspend if we're not driving any
8848 for (i = 0; i < crtc_disable_count; i++)
8849 pm_runtime_put_autosuspend(dev->dev);
8850 pm_runtime_mark_last_busy(dev->dev);
8853 dc_release_state(dc_state_temp);
8857 static int dm_force_atomic_commit(struct drm_connector *connector)
8860 struct drm_device *ddev = connector->dev;
8861 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8862 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8863 struct drm_plane *plane = disconnected_acrtc->base.primary;
8864 struct drm_connector_state *conn_state;
8865 struct drm_crtc_state *crtc_state;
8866 struct drm_plane_state *plane_state;
8871 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8873 /* Construct an atomic state to restore previous display setting */
8876 * Attach connectors to drm_atomic_state
8878 conn_state = drm_atomic_get_connector_state(state, connector);
8880 ret = PTR_ERR_OR_ZERO(conn_state);
8884 /* Attach crtc to drm_atomic_state*/
8885 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8887 ret = PTR_ERR_OR_ZERO(crtc_state);
8891 /* force a restore */
8892 crtc_state->mode_changed = true;
8894 /* Attach plane to drm_atomic_state */
8895 plane_state = drm_atomic_get_plane_state(state, plane);
8897 ret = PTR_ERR_OR_ZERO(plane_state);
8901 /* Call commit internally with the state we just constructed */
8902 ret = drm_atomic_commit(state);
8905 drm_atomic_state_put(state);
8907 DRM_ERROR("Restoring old state failed with %i\n", ret);
8913 * This function handles all cases when set mode does not come upon hotplug.
8914 * This includes when a display is unplugged then plugged back into the
8915 * same port and when running without usermode desktop manager supprot
8917 void dm_restore_drm_connector_state(struct drm_device *dev,
8918 struct drm_connector *connector)
8920 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8921 struct amdgpu_crtc *disconnected_acrtc;
8922 struct dm_crtc_state *acrtc_state;
8924 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8927 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8928 if (!disconnected_acrtc)
8931 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8932 if (!acrtc_state->stream)
8936 * If the previous sink is not released and different from the current,
8937 * we deduce we are in a state where we can not rely on usermode call
8938 * to turn on the display, so we do it here
8940 if (acrtc_state->stream->sink != aconnector->dc_sink)
8941 dm_force_atomic_commit(&aconnector->base);
8945 * Grabs all modesetting locks to serialize against any blocking commits,
8946 * Waits for completion of all non blocking commits.
8948 static int do_aquire_global_lock(struct drm_device *dev,
8949 struct drm_atomic_state *state)
8951 struct drm_crtc *crtc;
8952 struct drm_crtc_commit *commit;
8956 * Adding all modeset locks to aquire_ctx will
8957 * ensure that when the framework release it the
8958 * extra locks we are locking here will get released to
8960 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8964 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8965 spin_lock(&crtc->commit_lock);
8966 commit = list_first_entry_or_null(&crtc->commit_list,
8967 struct drm_crtc_commit, commit_entry);
8969 drm_crtc_commit_get(commit);
8970 spin_unlock(&crtc->commit_lock);
8976 * Make sure all pending HW programming completed and
8979 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8982 ret = wait_for_completion_interruptible_timeout(
8983 &commit->flip_done, 10*HZ);
8986 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8987 "timed out\n", crtc->base.id, crtc->name);
8989 drm_crtc_commit_put(commit);
8992 return ret < 0 ? ret : 0;
8995 static void get_freesync_config_for_crtc(
8996 struct dm_crtc_state *new_crtc_state,
8997 struct dm_connector_state *new_con_state)
8999 struct mod_freesync_config config = {0};
9000 struct amdgpu_dm_connector *aconnector =
9001 to_amdgpu_dm_connector(new_con_state->base.connector);
9002 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9003 int vrefresh = drm_mode_vrefresh(mode);
9004 bool fs_vid_mode = false;
9006 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9007 vrefresh >= aconnector->min_vfreq &&
9008 vrefresh <= aconnector->max_vfreq;
9010 if (new_crtc_state->vrr_supported) {
9011 new_crtc_state->stream->ignore_msa_timing_param = true;
9012 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9014 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9015 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9016 config.vsif_supported = true;
9020 config.state = VRR_STATE_ACTIVE_FIXED;
9021 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9023 } else if (new_crtc_state->base.vrr_enabled) {
9024 config.state = VRR_STATE_ACTIVE_VARIABLE;
9026 config.state = VRR_STATE_INACTIVE;
9030 new_crtc_state->freesync_config = config;
9033 static void reset_freesync_config_for_crtc(
9034 struct dm_crtc_state *new_crtc_state)
9036 new_crtc_state->vrr_supported = false;
9038 memset(&new_crtc_state->vrr_infopacket, 0,
9039 sizeof(new_crtc_state->vrr_infopacket));
9043 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9044 struct drm_crtc_state *new_crtc_state)
9046 struct drm_display_mode old_mode, new_mode;
9048 if (!old_crtc_state || !new_crtc_state)
9051 old_mode = old_crtc_state->mode;
9052 new_mode = new_crtc_state->mode;
9054 if (old_mode.clock == new_mode.clock &&
9055 old_mode.hdisplay == new_mode.hdisplay &&
9056 old_mode.vdisplay == new_mode.vdisplay &&
9057 old_mode.htotal == new_mode.htotal &&
9058 old_mode.vtotal != new_mode.vtotal &&
9059 old_mode.hsync_start == new_mode.hsync_start &&
9060 old_mode.vsync_start != new_mode.vsync_start &&
9061 old_mode.hsync_end == new_mode.hsync_end &&
9062 old_mode.vsync_end != new_mode.vsync_end &&
9063 old_mode.hskew == new_mode.hskew &&
9064 old_mode.vscan == new_mode.vscan &&
9065 (old_mode.vsync_end - old_mode.vsync_start) ==
9066 (new_mode.vsync_end - new_mode.vsync_start))
9072 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9073 uint64_t num, den, res;
9074 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9076 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9078 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9079 den = (unsigned long long)new_crtc_state->mode.htotal *
9080 (unsigned long long)new_crtc_state->mode.vtotal;
9082 res = div_u64(num, den);
9083 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9086 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9087 struct drm_atomic_state *state,
9088 struct drm_crtc *crtc,
9089 struct drm_crtc_state *old_crtc_state,
9090 struct drm_crtc_state *new_crtc_state,
9092 bool *lock_and_validation_needed)
9094 struct dm_atomic_state *dm_state = NULL;
9095 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9096 struct dc_stream_state *new_stream;
9100 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9101 * update changed items
9103 struct amdgpu_crtc *acrtc = NULL;
9104 struct amdgpu_dm_connector *aconnector = NULL;
9105 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9106 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9110 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9111 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9112 acrtc = to_amdgpu_crtc(crtc);
9113 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9115 /* TODO This hack should go away */
9116 if (aconnector && enable) {
9117 /* Make sure fake sink is created in plug-in scenario */
9118 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9120 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9123 if (IS_ERR(drm_new_conn_state)) {
9124 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9128 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9129 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9131 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9134 new_stream = create_validate_stream_for_sink(aconnector,
9135 &new_crtc_state->mode,
9137 dm_old_crtc_state->stream);
9140 * we can have no stream on ACTION_SET if a display
9141 * was disconnected during S3, in this case it is not an
9142 * error, the OS will be updated after detection, and
9143 * will do the right thing on next atomic commit
9147 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9148 __func__, acrtc->base.base.id);
9154 * TODO: Check VSDB bits to decide whether this should
9155 * be enabled or not.
9157 new_stream->triggered_crtc_reset.enabled =
9158 dm->force_timing_sync;
9160 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9162 ret = fill_hdr_info_packet(drm_new_conn_state,
9163 &new_stream->hdr_static_metadata);
9168 * If we already removed the old stream from the context
9169 * (and set the new stream to NULL) then we can't reuse
9170 * the old stream even if the stream and scaling are unchanged.
9171 * We'll hit the BUG_ON and black screen.
9173 * TODO: Refactor this function to allow this check to work
9174 * in all conditions.
9176 if (amdgpu_freesync_vid_mode &&
9177 dm_new_crtc_state->stream &&
9178 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9181 if (dm_new_crtc_state->stream &&
9182 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9183 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9184 new_crtc_state->mode_changed = false;
9185 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9186 new_crtc_state->mode_changed);
9190 /* mode_changed flag may get updated above, need to check again */
9191 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9195 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9196 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9197 "connectors_changed:%d\n",
9199 new_crtc_state->enable,
9200 new_crtc_state->active,
9201 new_crtc_state->planes_changed,
9202 new_crtc_state->mode_changed,
9203 new_crtc_state->active_changed,
9204 new_crtc_state->connectors_changed);
9206 /* Remove stream for any changed/disabled CRTC */
9209 if (!dm_old_crtc_state->stream)
9212 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9213 is_timing_unchanged_for_freesync(new_crtc_state,
9215 new_crtc_state->mode_changed = false;
9217 "Mode change not required for front porch change, "
9218 "setting mode_changed to %d",
9219 new_crtc_state->mode_changed);
9221 set_freesync_fixed_config(dm_new_crtc_state);
9224 } else if (amdgpu_freesync_vid_mode && aconnector &&
9225 is_freesync_video_mode(&new_crtc_state->mode,
9227 set_freesync_fixed_config(dm_new_crtc_state);
9230 ret = dm_atomic_get_state(state, &dm_state);
9234 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9237 /* i.e. reset mode */
9238 if (dc_remove_stream_from_ctx(
9241 dm_old_crtc_state->stream) != DC_OK) {
9246 dc_stream_release(dm_old_crtc_state->stream);
9247 dm_new_crtc_state->stream = NULL;
9249 reset_freesync_config_for_crtc(dm_new_crtc_state);
9251 *lock_and_validation_needed = true;
9253 } else {/* Add stream for any updated/enabled CRTC */
9255 * Quick fix to prevent NULL pointer on new_stream when
9256 * added MST connectors not found in existing crtc_state in the chained mode
9257 * TODO: need to dig out the root cause of that
9259 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9262 if (modereset_required(new_crtc_state))
9265 if (modeset_required(new_crtc_state, new_stream,
9266 dm_old_crtc_state->stream)) {
9268 WARN_ON(dm_new_crtc_state->stream);
9270 ret = dm_atomic_get_state(state, &dm_state);
9274 dm_new_crtc_state->stream = new_stream;
9276 dc_stream_retain(new_stream);
9278 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9281 if (dc_add_stream_to_ctx(
9284 dm_new_crtc_state->stream) != DC_OK) {
9289 *lock_and_validation_needed = true;
9294 /* Release extra reference */
9296 dc_stream_release(new_stream);
9299 * We want to do dc stream updates that do not require a
9300 * full modeset below.
9302 if (!(enable && aconnector && new_crtc_state->active))
9305 * Given above conditions, the dc state cannot be NULL because:
9306 * 1. We're in the process of enabling CRTCs (just been added
9307 * to the dc context, or already is on the context)
9308 * 2. Has a valid connector attached, and
9309 * 3. Is currently active and enabled.
9310 * => The dc stream state currently exists.
9312 BUG_ON(dm_new_crtc_state->stream == NULL);
9314 /* Scaling or underscan settings */
9315 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9316 update_stream_scaling_settings(
9317 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9320 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9323 * Color management settings. We also update color properties
9324 * when a modeset is needed, to ensure it gets reprogrammed.
9326 if (dm_new_crtc_state->base.color_mgmt_changed ||
9327 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9328 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9333 /* Update Freesync settings. */
9334 get_freesync_config_for_crtc(dm_new_crtc_state,
9341 dc_stream_release(new_stream);
9345 static bool should_reset_plane(struct drm_atomic_state *state,
9346 struct drm_plane *plane,
9347 struct drm_plane_state *old_plane_state,
9348 struct drm_plane_state *new_plane_state)
9350 struct drm_plane *other;
9351 struct drm_plane_state *old_other_state, *new_other_state;
9352 struct drm_crtc_state *new_crtc_state;
9356 * TODO: Remove this hack once the checks below are sufficient
9357 * enough to determine when we need to reset all the planes on
9360 if (state->allow_modeset)
9363 /* Exit early if we know that we're adding or removing the plane. */
9364 if (old_plane_state->crtc != new_plane_state->crtc)
9367 /* old crtc == new_crtc == NULL, plane not in context. */
9368 if (!new_plane_state->crtc)
9372 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9374 if (!new_crtc_state)
9377 /* CRTC Degamma changes currently require us to recreate planes. */
9378 if (new_crtc_state->color_mgmt_changed)
9381 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9385 * If there are any new primary or overlay planes being added or
9386 * removed then the z-order can potentially change. To ensure
9387 * correct z-order and pipe acquisition the current DC architecture
9388 * requires us to remove and recreate all existing planes.
9390 * TODO: Come up with a more elegant solution for this.
9392 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9393 struct amdgpu_framebuffer *old_afb, *new_afb;
9394 if (other->type == DRM_PLANE_TYPE_CURSOR)
9397 if (old_other_state->crtc != new_plane_state->crtc &&
9398 new_other_state->crtc != new_plane_state->crtc)
9401 if (old_other_state->crtc != new_other_state->crtc)
9404 /* Src/dst size and scaling updates. */
9405 if (old_other_state->src_w != new_other_state->src_w ||
9406 old_other_state->src_h != new_other_state->src_h ||
9407 old_other_state->crtc_w != new_other_state->crtc_w ||
9408 old_other_state->crtc_h != new_other_state->crtc_h)
9411 /* Rotation / mirroring updates. */
9412 if (old_other_state->rotation != new_other_state->rotation)
9415 /* Blending updates. */
9416 if (old_other_state->pixel_blend_mode !=
9417 new_other_state->pixel_blend_mode)
9420 /* Alpha updates. */
9421 if (old_other_state->alpha != new_other_state->alpha)
9424 /* Colorspace changes. */
9425 if (old_other_state->color_range != new_other_state->color_range ||
9426 old_other_state->color_encoding != new_other_state->color_encoding)
9429 /* Framebuffer checks fall at the end. */
9430 if (!old_other_state->fb || !new_other_state->fb)
9433 /* Pixel format changes can require bandwidth updates. */
9434 if (old_other_state->fb->format != new_other_state->fb->format)
9437 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9438 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9440 /* Tiling and DCC changes also require bandwidth updates. */
9441 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9442 old_afb->base.modifier != new_afb->base.modifier)
9449 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9450 struct drm_plane_state *new_plane_state,
9451 struct drm_framebuffer *fb)
9453 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9454 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9458 if (fb->width > new_acrtc->max_cursor_width ||
9459 fb->height > new_acrtc->max_cursor_height) {
9460 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9461 new_plane_state->fb->width,
9462 new_plane_state->fb->height);
9465 if (new_plane_state->src_w != fb->width << 16 ||
9466 new_plane_state->src_h != fb->height << 16) {
9467 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9471 /* Pitch in pixels */
9472 pitch = fb->pitches[0] / fb->format->cpp[0];
9474 if (fb->width != pitch) {
9475 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9484 /* FB pitch is supported by cursor plane */
9487 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9491 /* Core DRM takes care of checking FB modifiers, so we only need to
9492 * check tiling flags when the FB doesn't have a modifier. */
9493 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9494 if (adev->family < AMDGPU_FAMILY_AI) {
9495 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9496 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9497 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9499 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9502 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9510 static int dm_update_plane_state(struct dc *dc,
9511 struct drm_atomic_state *state,
9512 struct drm_plane *plane,
9513 struct drm_plane_state *old_plane_state,
9514 struct drm_plane_state *new_plane_state,
9516 bool *lock_and_validation_needed)
9519 struct dm_atomic_state *dm_state = NULL;
9520 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9521 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9522 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9523 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9524 struct amdgpu_crtc *new_acrtc;
9529 new_plane_crtc = new_plane_state->crtc;
9530 old_plane_crtc = old_plane_state->crtc;
9531 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9532 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9534 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9535 if (!enable || !new_plane_crtc ||
9536 drm_atomic_plane_disabling(plane->state, new_plane_state))
9539 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9541 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9542 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9546 if (new_plane_state->fb) {
9547 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9548 new_plane_state->fb);
9556 needs_reset = should_reset_plane(state, plane, old_plane_state,
9559 /* Remove any changed/removed planes */
9564 if (!old_plane_crtc)
9567 old_crtc_state = drm_atomic_get_old_crtc_state(
9568 state, old_plane_crtc);
9569 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9571 if (!dm_old_crtc_state->stream)
9574 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9575 plane->base.id, old_plane_crtc->base.id);
9577 ret = dm_atomic_get_state(state, &dm_state);
9581 if (!dc_remove_plane_from_context(
9583 dm_old_crtc_state->stream,
9584 dm_old_plane_state->dc_state,
9585 dm_state->context)) {
9591 dc_plane_state_release(dm_old_plane_state->dc_state);
9592 dm_new_plane_state->dc_state = NULL;
9594 *lock_and_validation_needed = true;
9596 } else { /* Add new planes */
9597 struct dc_plane_state *dc_new_plane_state;
9599 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9602 if (!new_plane_crtc)
9605 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9606 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9608 if (!dm_new_crtc_state->stream)
9614 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9618 WARN_ON(dm_new_plane_state->dc_state);
9620 dc_new_plane_state = dc_create_plane_state(dc);
9621 if (!dc_new_plane_state)
9624 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9625 plane->base.id, new_plane_crtc->base.id);
9627 ret = fill_dc_plane_attributes(
9628 drm_to_adev(new_plane_crtc->dev),
9633 dc_plane_state_release(dc_new_plane_state);
9637 ret = dm_atomic_get_state(state, &dm_state);
9639 dc_plane_state_release(dc_new_plane_state);
9644 * Any atomic check errors that occur after this will
9645 * not need a release. The plane state will be attached
9646 * to the stream, and therefore part of the atomic
9647 * state. It'll be released when the atomic state is
9650 if (!dc_add_plane_to_context(
9652 dm_new_crtc_state->stream,
9654 dm_state->context)) {
9656 dc_plane_state_release(dc_new_plane_state);
9660 dm_new_plane_state->dc_state = dc_new_plane_state;
9662 /* Tell DC to do a full surface update every time there
9663 * is a plane change. Inefficient, but works for now.
9665 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9667 *lock_and_validation_needed = true;
9674 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9675 struct drm_crtc *crtc,
9676 struct drm_crtc_state *new_crtc_state)
9678 struct drm_plane_state *new_cursor_state, *new_primary_state;
9679 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9681 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9682 * cursor per pipe but it's going to inherit the scaling and
9683 * positioning from the underlying pipe. Check the cursor plane's
9684 * blending properties match the primary plane's. */
9686 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9687 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9688 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9692 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9693 (new_cursor_state->src_w >> 16);
9694 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9695 (new_cursor_state->src_h >> 16);
9697 primary_scale_w = new_primary_state->crtc_w * 1000 /
9698 (new_primary_state->src_w >> 16);
9699 primary_scale_h = new_primary_state->crtc_h * 1000 /
9700 (new_primary_state->src_h >> 16);
9702 if (cursor_scale_w != primary_scale_w ||
9703 cursor_scale_h != primary_scale_h) {
9704 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9711 #if defined(CONFIG_DRM_AMD_DC_DCN)
9712 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9714 struct drm_connector *connector;
9715 struct drm_connector_state *conn_state;
9716 struct amdgpu_dm_connector *aconnector = NULL;
9718 for_each_new_connector_in_state(state, connector, conn_state, i) {
9719 if (conn_state->crtc != crtc)
9722 aconnector = to_amdgpu_dm_connector(connector);
9723 if (!aconnector->port || !aconnector->mst_port)
9732 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9737 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9738 * @dev: The DRM device
9739 * @state: The atomic state to commit
9741 * Validate that the given atomic state is programmable by DC into hardware.
9742 * This involves constructing a &struct dc_state reflecting the new hardware
9743 * state we wish to commit, then querying DC to see if it is programmable. It's
9744 * important not to modify the existing DC state. Otherwise, atomic_check
9745 * may unexpectedly commit hardware changes.
9747 * When validating the DC state, it's important that the right locks are
9748 * acquired. For full updates case which removes/adds/updates streams on one
9749 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9750 * that any such full update commit will wait for completion of any outstanding
9751 * flip using DRMs synchronization events.
9753 * Note that DM adds the affected connectors for all CRTCs in state, when that
9754 * might not seem necessary. This is because DC stream creation requires the
9755 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9756 * be possible but non-trivial - a possible TODO item.
9758 * Return: -Error code if validation failed.
9760 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9761 struct drm_atomic_state *state)
9763 struct amdgpu_device *adev = drm_to_adev(dev);
9764 struct dm_atomic_state *dm_state = NULL;
9765 struct dc *dc = adev->dm.dc;
9766 struct drm_connector *connector;
9767 struct drm_connector_state *old_con_state, *new_con_state;
9768 struct drm_crtc *crtc;
9769 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9770 struct drm_plane *plane;
9771 struct drm_plane_state *old_plane_state, *new_plane_state;
9772 enum dc_status status;
9774 bool lock_and_validation_needed = false;
9775 struct dm_crtc_state *dm_old_crtc_state;
9777 trace_amdgpu_dm_atomic_check_begin(state);
9779 ret = drm_atomic_helper_check_modeset(dev, state);
9783 /* Check connector changes */
9784 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9785 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9786 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9788 /* Skip connectors that are disabled or part of modeset already. */
9789 if (!old_con_state->crtc && !new_con_state->crtc)
9792 if (!new_con_state->crtc)
9795 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9796 if (IS_ERR(new_crtc_state)) {
9797 ret = PTR_ERR(new_crtc_state);
9801 if (dm_old_con_state->abm_level !=
9802 dm_new_con_state->abm_level)
9803 new_crtc_state->connectors_changed = true;
9806 #if defined(CONFIG_DRM_AMD_DC_DCN)
9807 if (dc_resource_is_dsc_encoding_supported(dc)) {
9808 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9809 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9810 ret = add_affected_mst_dsc_crtcs(state, crtc);
9817 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9818 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9820 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9821 !new_crtc_state->color_mgmt_changed &&
9822 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9823 dm_old_crtc_state->dsc_force_changed == false)
9826 if (!new_crtc_state->enable)
9829 ret = drm_atomic_add_affected_connectors(state, crtc);
9833 ret = drm_atomic_add_affected_planes(state, crtc);
9837 if (dm_old_crtc_state->dsc_force_changed)
9838 new_crtc_state->mode_changed = true;
9842 * Add all primary and overlay planes on the CRTC to the state
9843 * whenever a plane is enabled to maintain correct z-ordering
9844 * and to enable fast surface updates.
9846 drm_for_each_crtc(crtc, dev) {
9847 bool modified = false;
9849 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9850 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9853 if (new_plane_state->crtc == crtc ||
9854 old_plane_state->crtc == crtc) {
9863 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9864 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9868 drm_atomic_get_plane_state(state, plane);
9870 if (IS_ERR(new_plane_state)) {
9871 ret = PTR_ERR(new_plane_state);
9877 /* Remove exiting planes if they are modified */
9878 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9879 ret = dm_update_plane_state(dc, state, plane,
9883 &lock_and_validation_needed);
9888 /* Disable all crtcs which require disable */
9889 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9890 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9894 &lock_and_validation_needed);
9899 /* Enable all crtcs which require enable */
9900 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9901 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9905 &lock_and_validation_needed);
9910 /* Add new/modified planes */
9911 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9912 ret = dm_update_plane_state(dc, state, plane,
9916 &lock_and_validation_needed);
9921 /* Run this here since we want to validate the streams we created */
9922 ret = drm_atomic_helper_check_planes(dev, state);
9926 /* Check cursor planes scaling */
9927 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9928 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9933 if (state->legacy_cursor_update) {
9935 * This is a fast cursor update coming from the plane update
9936 * helper, check if it can be done asynchronously for better
9939 state->async_update =
9940 !drm_atomic_helper_async_check(dev, state);
9943 * Skip the remaining global validation if this is an async
9944 * update. Cursor updates can be done without affecting
9945 * state or bandwidth calcs and this avoids the performance
9946 * penalty of locking the private state object and
9947 * allocating a new dc_state.
9949 if (state->async_update)
9953 /* Check scaling and underscan changes*/
9954 /* TODO Removed scaling changes validation due to inability to commit
9955 * new stream into context w\o causing full reset. Need to
9956 * decide how to handle.
9958 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9959 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9960 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9963 /* Skip any modesets/resets */
9964 if (!acrtc || drm_atomic_crtc_needs_modeset(
9965 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9968 /* Skip any thing not scale or underscan changes */
9969 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9972 lock_and_validation_needed = true;
9976 * Streams and planes are reset when there are changes that affect
9977 * bandwidth. Anything that affects bandwidth needs to go through
9978 * DC global validation to ensure that the configuration can be applied
9981 * We have to currently stall out here in atomic_check for outstanding
9982 * commits to finish in this case because our IRQ handlers reference
9983 * DRM state directly - we can end up disabling interrupts too early
9986 * TODO: Remove this stall and drop DM state private objects.
9988 if (lock_and_validation_needed) {
9989 ret = dm_atomic_get_state(state, &dm_state);
9993 ret = do_aquire_global_lock(dev, state);
9997 #if defined(CONFIG_DRM_AMD_DC_DCN)
9998 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10001 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10007 * Perform validation of MST topology in the state:
10008 * We need to perform MST atomic check before calling
10009 * dc_validate_global_state(), or there is a chance
10010 * to get stuck in an infinite loop and hang eventually.
10012 ret = drm_dp_mst_atomic_check(state);
10015 status = dc_validate_global_state(dc, dm_state->context, false);
10016 if (status != DC_OK) {
10017 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10018 dc_status_to_str(status), status);
10024 * The commit is a fast update. Fast updates shouldn't change
10025 * the DC context, affect global validation, and can have their
10026 * commit work done in parallel with other commits not touching
10027 * the same resource. If we have a new DC context as part of
10028 * the DM atomic state from validation we need to free it and
10029 * retain the existing one instead.
10031 * Furthermore, since the DM atomic state only contains the DC
10032 * context and can safely be annulled, we can free the state
10033 * and clear the associated private object now to free
10034 * some memory and avoid a possible use-after-free later.
10037 for (i = 0; i < state->num_private_objs; i++) {
10038 struct drm_private_obj *obj = state->private_objs[i].ptr;
10040 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10041 int j = state->num_private_objs-1;
10043 dm_atomic_destroy_state(obj,
10044 state->private_objs[i].state);
10046 /* If i is not at the end of the array then the
10047 * last element needs to be moved to where i was
10048 * before the array can safely be truncated.
10051 state->private_objs[i] =
10052 state->private_objs[j];
10054 state->private_objs[j].ptr = NULL;
10055 state->private_objs[j].state = NULL;
10056 state->private_objs[j].old_state = NULL;
10057 state->private_objs[j].new_state = NULL;
10059 state->num_private_objs = j;
10065 /* Store the overall update type for use later in atomic check. */
10066 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10067 struct dm_crtc_state *dm_new_crtc_state =
10068 to_dm_crtc_state(new_crtc_state);
10070 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10075 /* Must be success */
10078 trace_amdgpu_dm_atomic_check_finish(state, ret);
10083 if (ret == -EDEADLK)
10084 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10085 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10086 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10088 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10090 trace_amdgpu_dm_atomic_check_finish(state, ret);
10095 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10096 struct amdgpu_dm_connector *amdgpu_dm_connector)
10099 bool capable = false;
10101 if (amdgpu_dm_connector->dc_link &&
10102 dm_helpers_dp_read_dpcd(
10104 amdgpu_dm_connector->dc_link,
10105 DP_DOWN_STREAM_PORT_COUNT,
10107 sizeof(dpcd_data))) {
10108 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10114 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10115 uint8_t *edid_ext, int len,
10116 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10119 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10120 struct dc *dc = adev->dm.dc;
10122 /* send extension block to DMCU for parsing */
10123 for (i = 0; i < len; i += 8) {
10127 /* send 8 bytes a time */
10128 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10132 /* EDID block sent completed, expect result */
10133 int version, min_rate, max_rate;
10135 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10137 /* amd vsdb found */
10138 vsdb_info->freesync_supported = 1;
10139 vsdb_info->amd_vsdb_version = version;
10140 vsdb_info->min_refresh_rate_hz = min_rate;
10141 vsdb_info->max_refresh_rate_hz = max_rate;
10149 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10157 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10158 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10160 uint8_t *edid_ext = NULL;
10162 bool valid_vsdb_found = false;
10164 /*----- drm_find_cea_extension() -----*/
10165 /* No EDID or EDID extensions */
10166 if (edid == NULL || edid->extensions == 0)
10169 /* Find CEA extension */
10170 for (i = 0; i < edid->extensions; i++) {
10171 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10172 if (edid_ext[0] == CEA_EXT)
10176 if (i == edid->extensions)
10179 /*----- cea_db_offsets() -----*/
10180 if (edid_ext[0] != CEA_EXT)
10183 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10185 return valid_vsdb_found ? i : -ENODEV;
10188 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10192 struct detailed_timing *timing;
10193 struct detailed_non_pixel *data;
10194 struct detailed_data_monitor_range *range;
10195 struct amdgpu_dm_connector *amdgpu_dm_connector =
10196 to_amdgpu_dm_connector(connector);
10197 struct dm_connector_state *dm_con_state = NULL;
10199 struct drm_device *dev = connector->dev;
10200 struct amdgpu_device *adev = drm_to_adev(dev);
10201 bool freesync_capable = false;
10202 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10204 if (!connector->state) {
10205 DRM_ERROR("%s - Connector has no state", __func__);
10210 dm_con_state = to_dm_connector_state(connector->state);
10212 amdgpu_dm_connector->min_vfreq = 0;
10213 amdgpu_dm_connector->max_vfreq = 0;
10214 amdgpu_dm_connector->pixel_clock_mhz = 0;
10219 dm_con_state = to_dm_connector_state(connector->state);
10221 if (!amdgpu_dm_connector->dc_sink) {
10222 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10225 if (!adev->dm.freesync_module)
10229 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10230 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10231 bool edid_check_required = false;
10234 edid_check_required = is_dp_capable_without_timing_msa(
10236 amdgpu_dm_connector);
10239 if (edid_check_required == true && (edid->version > 1 ||
10240 (edid->version == 1 && edid->revision > 1))) {
10241 for (i = 0; i < 4; i++) {
10243 timing = &edid->detailed_timings[i];
10244 data = &timing->data.other_data;
10245 range = &data->data.range;
10247 * Check if monitor has continuous frequency mode
10249 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10252 * Check for flag range limits only. If flag == 1 then
10253 * no additional timing information provided.
10254 * Default GTF, GTF Secondary curve and CVT are not
10257 if (range->flags != 1)
10260 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10261 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10262 amdgpu_dm_connector->pixel_clock_mhz =
10263 range->pixel_clock_mhz * 10;
10265 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10266 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10271 if (amdgpu_dm_connector->max_vfreq -
10272 amdgpu_dm_connector->min_vfreq > 10) {
10274 freesync_capable = true;
10277 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10278 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10279 if (i >= 0 && vsdb_info.freesync_supported) {
10280 timing = &edid->detailed_timings[i];
10281 data = &timing->data.other_data;
10283 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10284 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10285 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10286 freesync_capable = true;
10288 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10289 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10295 dm_con_state->freesync_capable = freesync_capable;
10297 if (connector->vrr_capable_property)
10298 drm_connector_set_vrr_capable_property(connector,
10302 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10304 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10306 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10308 if (link->type == dc_connection_none)
10310 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10311 dpcd_data, sizeof(dpcd_data))) {
10312 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10314 if (dpcd_data[0] == 0) {
10315 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10316 link->psr_settings.psr_feature_enabled = false;
10318 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10319 link->psr_settings.psr_feature_enabled = true;
10322 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10327 * amdgpu_dm_link_setup_psr() - configure psr link
10328 * @stream: stream state
10330 * Return: true if success
10332 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10334 struct dc_link *link = NULL;
10335 struct psr_config psr_config = {0};
10336 struct psr_context psr_context = {0};
10339 if (stream == NULL)
10342 link = stream->link;
10344 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10346 if (psr_config.psr_version > 0) {
10347 psr_config.psr_exit_link_training_required = 0x1;
10348 psr_config.psr_frame_capture_indication_req = 0;
10349 psr_config.psr_rfb_setup_time = 0x37;
10350 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10351 psr_config.allow_smu_optimizations = 0x0;
10353 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10356 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10362 * amdgpu_dm_psr_enable() - enable psr f/w
10363 * @stream: stream state
10365 * Return: true if success
10367 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10369 struct dc_link *link = stream->link;
10370 unsigned int vsync_rate_hz = 0;
10371 struct dc_static_screen_params params = {0};
10372 /* Calculate number of static frames before generating interrupt to
10375 // Init fail safe of 2 frames static
10376 unsigned int num_frames_static = 2;
10378 DRM_DEBUG_DRIVER("Enabling psr...\n");
10380 vsync_rate_hz = div64_u64(div64_u64((
10381 stream->timing.pix_clk_100hz * 100),
10382 stream->timing.v_total),
10383 stream->timing.h_total);
10386 * Calculate number of frames such that at least 30 ms of time has
10389 if (vsync_rate_hz != 0) {
10390 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10391 num_frames_static = (30000 / frame_time_microsec) + 1;
10394 params.triggers.cursor_update = true;
10395 params.triggers.overlay_update = true;
10396 params.triggers.surface_update = true;
10397 params.num_frames = num_frames_static;
10399 dc_stream_set_static_screen_params(link->ctx->dc,
10403 return dc_link_set_psr_allow_active(link, true, false, false);
10407 * amdgpu_dm_psr_disable() - disable psr f/w
10408 * @stream: stream state
10410 * Return: true if success
10412 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10415 DRM_DEBUG_DRIVER("Disabling psr...\n");
10417 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10421 * amdgpu_dm_psr_disable() - disable psr f/w
10422 * if psr is enabled on any stream
10424 * Return: true if success
10426 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10428 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10429 return dc_set_psr_allow_active(dm->dc, false);
10432 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10434 struct amdgpu_device *adev = drm_to_adev(dev);
10435 struct dc *dc = adev->dm.dc;
10438 mutex_lock(&adev->dm.dc_lock);
10439 if (dc->current_state) {
10440 for (i = 0; i < dc->current_state->stream_count; ++i)
10441 dc->current_state->streams[i]
10442 ->triggered_crtc_reset.enabled =
10443 adev->dm.force_timing_sync;
10445 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10446 dc_trigger_sync(dc, dc->current_state);
10448 mutex_unlock(&adev->dm.dc_lock);
10451 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10452 uint32_t value, const char *func_name)
10454 #ifdef DM_CHECK_ADDR_0
10455 if (address == 0) {
10456 DC_ERR("invalid register write. address = 0");
10460 cgs_write_register(ctx->cgs_device, address, value);
10461 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10464 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10465 const char *func_name)
10468 #ifdef DM_CHECK_ADDR_0
10469 if (address == 0) {
10470 DC_ERR("invalid register read; address = 0\n");
10475 if (ctx->dmub_srv &&
10476 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10477 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10482 value = cgs_read_register(ctx->cgs_device, address);
10484 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);